@inproceedings{li-etal-2025-hallucana,
title = "{HALLUCANA}: Fixing {LLM} Hallucination with A Canary Lookahead",
author = "Li, Tianyi and
Dayanik, Erenay and
Tyagi, Shubhi and
Pierleoni, Andrea",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.12/",
pages = "213--230",
ISBN = "979-8-89176-195-7",
abstract = "In this paper, we present HALLUCANA, a canary lookahead to detect and correct factual hallucinations of Large Language Models (LLMs) in long-form generation. HALLUCANA detects and intervenes as soon as traces of hallucination emerge, during and even before generation. To support timely detection, we exploit the internal factuality representation in the LLM hidden space, where we investigate various proxies to the LLMs' factuality self-assessment, and discuss its relation to the models' context familiarity from their pre-training. On biography generation, our method improves generation quality by up to 2.5x, while consuming over 6 times less compute."
}
Markdown (Informal)
[HALLUCANA: Fixing LLM Hallucination with A Canary Lookahead](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.12/) (Li et al., Findings 2025)
ACL