@inproceedings{cohen-etal-2025-infact,
title = "{I}n{F}act: Informativeness Alignment for Improved {LLM} Factuality",
author = "Cohen, Roi and
Biswas, Russa and
de Melo, Gerard",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.971/",
doi = "10.18653/v1/2025.findings-emnlp.971",
pages = "17876--17888",
ISBN = "979-8-89176-335-7",
abstract = "Factual completeness is a general term that captures how detailed and informative a factually correct text is. For instance, the factual sentence ``Barack Obama was born in the United States'' is factually correct, though less informative than the factual sentence ``Barack Obama was born in Honolulu, Hawaii, United States''. Despite the known fact that LLMs tend to hallucinate and generate factually incorrect text, they might also tend to choose to generate factual text that is indeed factually correct and yet less informative than other, more informative choices. In this work, we tackle this problem by proposing an informativeness alignment mechanism. This mechanism takes advantage of recent factual informativeness benchmarks to propose an informativeness alignment objective. This objective prioritizes answers that are both correct and informative. We find that when training a model to maximize this objective or optimize its preference, we can improve not just informativeness but also factuality."
}Markdown (Informal)
[InFact: Informativeness Alignment for Improved LLM Factuality](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.971/) (Cohen et al., Findings 2025)
ACL