@inproceedings{rozner-etal-2024-knowledge,
title = "Knowledge Editing in Language Models via Adapted Direct Preference Optimization",
author = "Rozner, Amit and
Battash, Barak and
Wolf, Lior and
Lindenbaum, Ofir",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.273/",
doi = "10.18653/v1/2024.findings-emnlp.273",
pages = "4761--4774",
abstract = "Large Language Models (LLMs) can become outdated over time as they may lack updated world knowledge, leading to factual knowledge errors and gaps. Knowledge Editing (KE) aims to overcome this challenge using weight updates that do not require expensive retraining. We propose treating KE as an LLM alignment problem. Toward this goal, we introduce Knowledge Direct Preference Optimization (KDPO), a variation of the Direct Preference Optimization (DPO) that is more effective for knowledge modifications. Our method is based on an online approach that continually updates the knowledge stored in the model. We use the current knowledge as a negative sample and the new knowledge we want to introduce as a positive sample in a process called DPO. We also use teacher-forcing for negative sample generation and optimize using the positive sample, which helps maintain localized changes. We tested our KE method on various datasets and models, comparing it to several cutting-edge methods, with 100 and 500 sequential edits. Additionally, we conducted an ablation study comparing our method to the standard DPO approach. Our experimental results show that our modified DPO method allows for more refined KE, achieving similar or better performance compared to previous methods."
}
Markdown (Informal)
[Knowledge Editing in Language Models via Adapted Direct Preference Optimization](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.273/) (Rozner et al., Findings 2024)
ACL