@inproceedings{li-chu-2024-continually,
title = "Can We Continually Edit Language Models? On the Knowledge Attenuation in Sequential Model Editing",
author = "Li, Qi and
Chu, Xiaowen",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-acl.323/",
doi = "10.18653/v1/2024.findings-acl.323",
pages = "5438--5455",
abstract = "Model editing has become a promising method for precisely and effectively updating knowledge in language models. In this paper, we investigate knowledge attenuation, in which the retention of updated knowledge within the language model decreases as the number of edits increases after sequential editing. Through empirical study, we discovered that existing editing methods generally suffer from knowledge attenuation. We attribute this phenomenon to two aspects: (1) redundant parameters interference and (2) update weight disentanglement. To this end, we propose the AdaPLE method. It not only mitigates the knowledge attenuation issue but also improves the performance on existing benchmarks. To the best of our knowledge, we are the first to investigate the cause and mitigation of knowledge attenuation in sequential LLM editing."
}
Markdown (Informal)
[Can We Continually Edit Language Models? On the Knowledge Attenuation in Sequential Model Editing](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-acl.323/) (Li & Chu, Findings 2024)
ACL