@inproceedings{alnuhait-etal-2025-factcheckmate,
title = "{FACTCHECKMATE}: Preemptively Detecting and Mitigating Hallucinations in {LM}s",
author = "Alnuhait, Deema and
Kirtane, Neeraja and
Khalifa, Muhammad and
Peng, Hao",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.663/",
doi = "10.18653/v1/2025.findings-emnlp.663",
pages = "12413--12428",
ISBN = "979-8-89176-335-7",
abstract = "Language models (LMs) hallucinate. We inquire: Can we detect and mitigate hallucinations before they happen? This work answers this research question in the positive, by showing that the internal representations of LMs provide rich signals that can be used for this purpose. We introduce FactCheckmate, which preemptively detects hallucinations by learning a classifier that predicts whether the LM will hallucinate, based on the model{'}s hidden states produced over the inputs, before decoding begins. If a hallucination is detected, FactCheckmate then intervenes by adjusting the LM{'}s hidden states such that the model will produce more factual outputs. FactCheckmate provides fresh insights that the inner workings of LMs can be revealed by their hidden states. Practically, both its detection and mitigation models are lightweight, adding little inference overhead; FactCheckmate proves a more efficient approach for mitigating hallucinations compared to many post-hoc alternatives. We evaluate FactCheckmate over LMs of different scales and model families (including Llama, Mistral, Qwen and Gemma), across a variety of QA datasets from different domains. Our results demonstrate the effectiveness of FactCheckmate, achieving over 70{\%} preemptive detection accuracy. On average, outputs generated by LMs with intervention are 34.4{\%} more factual compared to those without."
}Markdown (Informal)
[FACTCHECKMATE: Preemptively Detecting and Mitigating Hallucinations in LMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.663/) (Alnuhait et al., Findings 2025)
ACL