@inproceedings{wan-etal-2025-positional,
title = "On Positional Bias of Faithfulness for Long-form Summarization",
author = "Wan, David and
Vig, Jesse and
Bansal, Mohit and
Joty, Shafiq",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.442/",
pages = "8791--8810",
ISBN = "979-8-89176-189-6",
abstract = "Large Language Models (LLMs) often exhibit positional bias in long-context settings, under-attending to information in the middle of inputs. We investigate the presence of this bias in long-form summarization, its impact on faithfulness, and various techniques to mitigate this bias. To consistently evaluate faithfulness, we first compile a benchmark of eight human-annotated long-form summarization datasets and perform a meta-evaluation of faithfulness metrics. We show that LLM-based faithfulness metrics, though effective with full-context inputs, remain sensitive to document order, indicating positional bias. Analyzing LLM-generated summaries across six datasets, we find a ``U-shaped'' trend in faithfulness, where LLMs faithfully summarize the beginning and end of documents but neglect middle content. Perturbing document order similarly reveals models are less faithful when important documents are placed in the middle of the input. We find that this behavior is partly due to shifting focus with context length: as context increases, summaries become less faithful, but beyond a certain length, faithfulness improves as the model focuses on the end. Finally, we experiment with different generation techniques to reduce positional bias and find that prompting techniques effectively direct model attention to specific positions, whereas more sophisticated approaches offer limited improvements. Our data and code will be publicly available."
}
Markdown (Informal)
[On Positional Bias of Faithfulness for Long-form Summarization](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.442/) (Wan et al., NAACL 2025)
ACL
- David Wan, Jesse Vig, Mohit Bansal, and Shafiq Joty. 2025. On Positional Bias of Faithfulness for Long-form Summarization. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 8791–8810, Albuquerque, New Mexico. Association for Computational Linguistics.