@inproceedings{takeshita-etal-2025-irsum,
title = "{IRS}um: One Model to Rule Summarization and Retrieval",
author = "Takeshita, Sotaro and
Ponzetto, Simone Paolo and
Eckert, Kai",
editor = "Dhole, Kaustubh and
Clinciu, Miruna",
booktitle = "Proceedings of the Fourth Workshop on Generation, Evaluation and Metrics (GEM{\texttwosuperior})",
month = jul,
year = "2025",
address = "Vienna, Austria and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.gem-1.23/",
pages = "262--275",
ISBN = "979-8-89176-261-9",
abstract = "Applications that store a large number of documents often have summarization and retrieval functionalities to help users digest large amounts of information efficiently. Currently, such systems need to run two task-specific models, for summarization and retrieval, redundantly on the same set of documents. An efficient approach to amend this redundancy would be to reuse hidden representations produced during the summary generation for retrieval. However, our experiment shows that existing models, including recent large language models, do not produce retrieval-friendly embeddings during summarization due to a lack of a contrastive objective during their training. To this end, we introduce a simple, cost-effective training strategy which integrates a contrastive objective into standard summarization training without requiring additional annotations. We empirically show that our model can perform on par or even outperform in some cases compared to the combination of two task-specific models while improving throughput and FLOPs by up to 17{\%} and 20{\%}, respectively."
}
Markdown (Informal)
[IRSum: One Model to Rule Summarization and Retrieval](https://preview.aclanthology.org/corrections-2025-08/2025.gem-1.23/) (Takeshita et al., GEM 2025)
ACL
- Sotaro Takeshita, Simone Paolo Ponzetto, and Kai Eckert. 2025. IRSum: One Model to Rule Summarization and Retrieval. In Proceedings of the Fourth Workshop on Generation, Evaluation and Metrics (GEM²), pages 262–275, Vienna, Austria and virtual meeting. Association for Computational Linguistics.