@inproceedings{mrini-etal-2021-rewards,
title = "Rewards with Negative Examples for Reinforced Topic-Focused Abstractive Summarization",
author = "Mrini, Khalil and
Liu, Can and
Dreyer, Markus",
editor = "Carenini, Giuseppe and
Cheung, Jackie Chi Kit and
Dong, Yue and
Liu, Fei and
Wang, Lu",
booktitle = "Proceedings of the Third Workshop on New Frontiers in Summarization",
month = nov,
year = "2021",
address = "Online and in Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.newsum-1.4/",
doi = "10.18653/v1/2021.newsum-1.4",
pages = "33--38",
abstract = "We consider the problem of topic-focused abstractive summarization, where the goal is to generate an abstractive summary focused on a particular topic, a phrase of one or multiple words. We hypothesize that the task of generating topic-focused summaries can be improved by showing the model what it must not focus on. We introduce a deep reinforcement learning approach to topic-focused abstractive summarization, trained on rewards with a novel negative example baseline. We define the input in this problem as the source text preceded by the topic. We adapt the CNN-Daily Mail and New York Times summarization datasets for this task. We then show through experiments on existing rewards that the use of a negative example baseline can outperform the use of a self-critical baseline, in Rouge, BERTScore, and human evaluation metrics."
}
Markdown (Informal)
[Rewards with Negative Examples for Reinforced Topic-Focused Abstractive Summarization](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.newsum-1.4/) (Mrini et al., NewSum 2021)
ACL