@inproceedings{qiao-etal-2025-seqmmr,
title = "{S}eq{MMR}: Sequential Model Merging and {LLM} Routing for Enhanced Batched Sequential Knowledge Editing",
author = "Qiao, Shanbao and
Liu, Xuebing and
Gupta, Akshat and
Na, Seung-Hoon",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.870/",
doi = "10.18653/v1/2025.findings-acl.870",
pages = "16932--16947",
ISBN = "979-8-89176-256-5",
abstract = "Model knowledge editing enables the efficient correction of erroneous information and the continuous updating of outdated knowledge within language models. While existing research has demonstrated strong performance in single-instance or few-instance sequential editing and one-time massive editing scenarios, the batched sequential editing paradigm remains a significant challenge. The primary issue lies in the model{'}s tendency to gradually forget previously edited knowledge and become increasingly unstable after multiple iterations of batched editing. To address these challenges, we propose **SeqMMR**, an enhanced framework for batched sequential knowledge editing that leverages **Seq**uential **M**odel **M**erging and a model **R**outer. Our approach iteratively merges parameters from current batch-edited models with those of their predecessors, ensuring that newly emerging knowledge is integrated while mitigating the forgetting of previously edited knowledge. Furthermore, the model router directs queries unrelated to the edited knowledge to an unedited model backup, preventing unintended alterations in model predictions. Extensive experiments across various datasets demonstrate that our approach effectively mitigates knowledge forgetting, improves performance across all previous batches, and better preserves the model{'}s general capabilities."
}
Markdown (Informal)
[SeqMMR: Sequential Model Merging and LLM Routing for Enhanced Batched Sequential Knowledge Editing](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.870/) (Qiao et al., Findings 2025)
ACL