@inproceedings{yu-etal-2024-mechanistic,
title = "Mechanistic Understanding and Mitigation of Language Model Non-Factual Hallucinations",
author = "Yu, Lei and
Cao, Meng and
Cheung, Jackie CK and
Dong, Yue",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.466/",
doi = "10.18653/v1/2024.findings-emnlp.466",
pages = "7943--7956",
abstract = "State-of-the-art language models (LMs) sometimes generate that misalign with world knowledge. To explore the mechanistic causes of these hallucinations, we create diagnostic datasets with subject-relation queries and adapt interpretability methods to trace hallucinations through internal model representations. We discover two general and distinct mechanistic causes of hallucinations shared across LMs (Llama-2, Pythia, GPT-J): 1) : insufficient subject attribute knowledge in lower layer MLPs, and 2) : failure to select the correct object attribute in upper layer attention heads. We also found these two internal mechanistic causes of hallucinations are reflected in external manifestations. Based on insights from our mechanistic analysis, we propose a novel hallucination mitigation method through targeted restoration of the LM`s internal fact recall pipeline, demonstrating superior performance compared to baselines."
}
Markdown (Informal)
[Mechanistic Understanding and Mitigation of Language Model Non-Factual Hallucinations](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.466/) (Yu et al., Findings 2024)
ACL