@inproceedings{ravaut-etal-2023-unsupervised,
title = "Unsupervised Summarization Re-ranking",
author = "Ravaut, Mathieu and
Joty, Shafiq and
Chen, Nancy",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.529/",
doi = "10.18653/v1/2023.findings-acl.529",
pages = "8341--8376",
abstract = "With the rise of task-specific pre-training objectives, abstractive summarization models like PEGASUS offer appealing zero-shot performance on downstream summarization tasks. However, the performance of such unsupervised models still lags significantly behind their supervised counterparts. Similarly to the supervised setup, we notice a very high variance in quality among summary candidates from these models while only one candidate is kept as the summary output. In this paper, we propose to re-rank summary candidates in an unsupervised manner, aiming to close the performance gap between unsupervised and supervised models. Our approach improves the unsupervised PEGASUS by up to 7.27{\%} and ChatGPT by up to 6.86{\%} relative mean ROUGE across four widely-adopted summarization benchmarks ; and achieves relative gains of 7.51{\%} (up to 23.73{\%} from XSum to WikiHow) averaged over 30 zero-shot transfer setups (finetuning on a dataset, evaluating on another)."
}
Markdown (Informal)
[Unsupervised Summarization Re-ranking](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.529/) (Ravaut et al., Findings 2023)
ACL
- Mathieu Ravaut, Shafiq Joty, and Nancy Chen. 2023. Unsupervised Summarization Re-ranking. In Findings of the Association for Computational Linguistics: ACL 2023, pages 8341–8376, Toronto, Canada. Association for Computational Linguistics.