@inproceedings{gao-etal-2020-supert,
title = "{SUPERT}: Towards New Frontiers in Unsupervised Evaluation Metrics for Multi-Document Summarization",
author = "Gao, Yang and
Zhao, Wei and
Eger, Steffen",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.acl-main.124/",
doi = "10.18653/v1/2020.acl-main.124",
pages = "1347--1354",
abstract = "We study unsupervised multi-document summarization evaluation metrics, which require neither human-written reference summaries nor human annotations (e.g. preferences, ratings, etc.). We propose SUPERT, which rates the quality of a summary by measuring its semantic similarity with a pseudo reference summary, i.e. selected salient sentences from the source documents, using contextualized embeddings and soft token alignment techniques. Compared to the state-of-the-art unsupervised evaluation metrics, SUPERT correlates better with human ratings by 18- 39{\%}. Furthermore, we use SUPERT as rewards to guide a neural-based reinforcement learning summarizer, yielding favorable performance compared to the state-of-the-art unsupervised summarizers. All source code is available at \url{https://github.com/yg211/acl20-ref-free-eval}."
}
Markdown (Informal)
[SUPERT: Towards New Frontiers in Unsupervised Evaluation Metrics for Multi-Document Summarization](https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.acl-main.124/) (Gao et al., ACL 2020)
ACL