@inproceedings{hong-lipani-2024-interpretability,
title = "Interpretability-based Tailored Knowledge Editing in Transformers",
author = "Hong, Yihuai and
Lipani, Aldo",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.225/",
doi = "10.18653/v1/2024.emnlp-main.225",
pages = "3847--3858",
abstract = "Language models recognized as a new form of knowledge bases, face challenges of outdated, erroneous, and privacy-sensitive information, necessitating knowledge editing to rectify errors without costly retraining. Existing methods, spanning model`s parameters modification, external knowledge integration, and in-context learning, lack in-depth analysis from a model interpretability perspective. Our work explores the instability in in-context learning outcomes, providing insights into its reasons and distinctions from other methods. Leveraging findings on the critical role of feed-forward MLPs in decoder-only models, we propose a tailored knowledge editing method, TailoredKE, that considers the unique information flow of each sample. Model interpretability reveals diverse attribute recall across transformer layers, guiding edits to specific features at different depths and mitigating over-editing issues."
}
Markdown (Informal)
[Interpretability-based Tailored Knowledge Editing in Transformers](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.225/) (Hong & Lipani, EMNLP 2024)
ACL