@inproceedings{li-etal-2025-topic,
title = "Topic-Guided Reinforcement Learning with {LLM}s for Enhancing Multi-Document Summarization",
author = "Li, Chuyuan and
Xu, Austin and
Joty, Shafiq and
Carenini, Giuseppe",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.662/",
doi = "10.18653/v1/2025.findings-emnlp.662",
pages = "12395--12412",
ISBN = "979-8-89176-335-7",
abstract = "A key challenge in Multi-Document Summarization (MDS) is effectively integrating information from multiple sources while maintaining coherence and topical relevance. While Large Language Models (LLMs) have shown impressive results in single-document summarization, their performance on MDS still leaves room for improvement. In this paper, we propose a topic-guided reinforcement learning approach to improve content selection in MDS. We first show that explicitly prompting models with topic labels enhances the informativeness. Building on this insight, we propose a novel topic reward within the Group Relative Policy Optimization (GRPO) framework to measure topic alignment between the generated summary and source documents. Experimental results on the Multi-News and Multi-XScience datasets demonstrate that our method consistently outperforms strong baselines, highlighting the effectiveness of leveraging topical cues in MDS."
}Markdown (Informal)
[Topic-Guided Reinforcement Learning with LLMs for Enhancing Multi-Document Summarization](https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.662/) (Li et al., Findings 2025)
ACL