@inproceedings{chen-etal-2025-explainable,
title = "Explainable Hallucination through Natural Language Inference Mapping",
author = "Chen, Wei-Fan and
Zhao, Zhixue and
Karimi, Akbar and
Flek, Lucie",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.96/",
pages = "1888--1896",
ISBN = "979-8-89176-256-5",
abstract = "Large language models (LLMs) often generate hallucinated content, making it crucial to identify and quantify inconsistencies in their outputs. We introduce HaluMap, a post-hoc framework that detects hallucinations by mapping entailment and contradiction relations between source inputs and generated outputs using a natural language inference (NLI) model. To improve reliability, we propose a calibration step leveraging intra-text relations to refine predictions. HaluMap outperforms state-of-the-art NLI-based methods by five percentage points compared to other training-free approaches, while providing clear, interpretable explanations. As a training-free and model-agnostic approach, HaluMap offers a practical solution for verifying LLM outputs across diverse NLP tasks. The resources of this paper are available at https://github.com/caisa-lab/acl25-halumap."
}
Markdown (Informal)
[Explainable Hallucination through Natural Language Inference Mapping](https://preview.aclanthology.org/landing_page/2025.findings-acl.96/) (Chen et al., Findings 2025)
ACL