@inproceedings{wu-etal-2025-robust,
title = "Robust Knowledge Editing via Explicit Reasoning Chains for Distractor-Resilient Multi-Hop {QA}",
author = "Wu, Yuchen and
Ding, Liang and
Shen, Li and
Tao, Dacheng",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.786/",
doi = "10.18653/v1/2025.findings-emnlp.786",
pages = "14578--14586",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) encode vast amounts of world knowledge but remain static once trained, making timely integration of emerging facts prohibitively expensive via full retraining. Knowledge-editing techniques have thus emerged to inject or overwrite specific facts into LLMs, yet they either over-rely on superficial cues or incur complex, iterative pipelines that collapse under noisy, multi-hop conditions. We introduce **Reason-KE**, an end-to-end reasoning-chain-based editing framework that steers a pretrained LLM through four structured stages{---}fact acknowledgment, relevance determination, selective application, and final reasoning{---}to filter distractors in a single pass. Trained on MQuAKE-CF with up to four irrelevant facts, Reason-KE elevates Qwen2.5-7B{'}s multi-hop QA accuracy to 90.2{\%} ({\textuparrow}17.6 pp) while suffering merely 6.3{\%} drop under heavy distraction and {\ensuremath{<}}1{\%} when answers are leaked. Our quantitative analysis confirms Reason-KE{'}s resilience and efficiency, establishing a new state of the art for reliable LLM knowledge updates. The code will be released."
}Markdown (Informal)
[Robust Knowledge Editing via Explicit Reasoning Chains for Distractor-Resilient Multi-Hop QA](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.786/) (Wu et al., Findings 2025)
ACL