@inproceedings{bagheri-nezhad-etal-2025-fair,
title = "Fair Summarization: Bridging Quality and Diversity in Extractive Summaries",
author = "Bagheri Nezhad, Sina and
Bandyapadhyay, Sayan and
Agrawal, Ameeta",
editor = "Prabhakaran, Vinodkumar and
Dev, Sunipa and
Benotti, Luciana and
Hershcovich, Daniel and
Cao, Yong and
Zhou, Li and
Cabello, Laura and
Adebara, Ife",
booktitle = "Proceedings of the 3rd Workshop on Cross-Cultural Considerations in NLP (C3NLP 2025)",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.c3nlp-1.3/",
pages = "22--34",
ISBN = "979-8-89176-237-4",
abstract = "Fairness in multi-document summarization of user-generated content remains a critical challenge in natural language processing (NLP). Existing summarization methods often fail to ensure equitable representation across different social groups, leading to biased outputs. In this paper, we introduce two novel methods for fair extractive summarization: FairExtract, a clustering-based approach, and FairGPT, which leverages GPT-3.5-turbo with fairness constraints. We evaluate these methods using Divsumm summarization dataset of White-aligned, Hispanic, and African-American dialect tweets and compare them against relevant baselines. The results obtained using a comprehensive set of summarization quality metrics such as SUPERT, BLANC, SummaQA, BARTScore, and UniEval, as well as a fairness metric F, demonstrate that FairExtract and FairGPT achieve superior fairness while maintaining competitive summarization quality. Additionally, we introduce composite metrics (e.g., SUPERT+F, BLANC+F) that integrate quality and fairness into a single evaluation framework, offering a more nuanced understanding of the trade-offs between these objectives. Our code is available online."
}
Markdown (Informal)
[Fair Summarization: Bridging Quality and Diversity in Extractive Summaries](https://preview.aclanthology.org/landing_page/2025.c3nlp-1.3/) (Bagheri Nezhad et al., C3NLP 2025)
ACL