@inproceedings{fu-etal-2023-inverse,
title = "Inverse Reinforcement Learning for Text Summarization",
author = "Fu, Yu and
Xiong, Deyi and
Dong, Yue",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.436/",
doi = "10.18653/v1/2023.findings-emnlp.436",
pages = "6559--6570",
abstract = "We introduce inverse reinforcement learning (IRL) as an effective paradigm for training abstractive summarization models, imitating human summarization behaviors. Our IRL model estimates the reward function using a suite of important sub-rewards for summarization and concurrently optimizes the policy network. Experimental results across datasets in different domains (CNN/DailyMail and WikiHow) and various model sizes (BART-base and BART-large) demonstrate the superiority of our proposed IRL model for summarization over MLE and RL baselines. The resulting summaries exhibit greater similarity to human-crafted gold references, outperforming MLE and RL baselines on metrics such as ROUGE, coverage, novelty, compression ratio, factuality, and human evaluations."
}
Markdown (Informal)
[Inverse Reinforcement Learning for Text Summarization](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.436/) (Fu et al., Findings 2023)
ACL