@inproceedings{li-yu-2025-summary,
title = "Summary Factual Inconsistency Detection Based on {LLM}s Enhanced by Universal Information Extraction",
author = "Li, Anguo and
Yu, Lei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1305/",
pages = "25450--25465",
ISBN = "979-8-89176-256-5",
abstract = "Automatic text summarization has a potential flaw that affects the factuality of summaries. Recently, Large Language Models (LLMs) have been introduced as detectors for factual inconsistencies in summaries. However, LLM-based methods rely on reasoning capabilities and face challenges in terms of efficiency and explainability. We focus on decoupling LLMs' information extraction and reasoning capabilities to address prominent challenges, and propose a novel framework, UIEFID (Universal Information Extraction-enhanced Factual Inconsistency Detection). Our idea is to define a self-adaptive structured schema to guide fine-tuned LLMs in extracting unified structured information from documents and summaries, ultimately detecting the origins of inconsistencies in extraction information. The evaluation on 5 open-source models shows that UIEFID not only enhances the detection accuracy on the AGGREFACT benchmark but also significantly reduces redundant reasoning."
}
Markdown (Informal)
[Summary Factual Inconsistency Detection Based on LLMs Enhanced by Universal Information Extraction](https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1305/) (Li & Yu, Findings 2025)
ACL