@inproceedings{kadotani-etal-2025-learning,
title = "Learning from Hallucinations: Mitigating Hallucinations in {LLM}s via Internal Representation Intervention",
author = "Kadotani, Sora and
Nishida, Kosuke and
Nishida, Kyosuke",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.68/",
pages = "1136--1143",
ISBN = "979-8-89176-303-6",
abstract = "Large language models (LLMs) sometimes hallucinate facts. Recent studies have shown that use of non-factual LLMs (anti-expert) have the potential to improve the factuality of the base LLM. Anti-expert methods penalize the output probabilities of the base LLM with an anti-expert LLM. Anti-expert methods are effective in mitigating hallucinations, but require high computational costs because the two LLMs are run simultaneously. In this paper, we propose an efficient anti-expert method called in-model anti-expert. It mitigated the hallucination problem with a single LLM and intervening to change the internal representations in the direction of improving factuality. Experiments results showed that the proposed method is less costly than the conventional anti-expert method and outperformed existing methods except for the anti-expert method. We confirmed that the proposed method improved GPU memory usage from 2.2x to 1.2x and latency from 1.9x to 1.2x."
}Markdown (Informal)
[Learning from Hallucinations: Mitigating Hallucinations in LLMs via Internal Representation Intervention](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.68/) (Kadotani et al., Findings 2025)
ACL