@inproceedings{cao-etal-2020-factual,
title = "Factual Error Correction for Abstractive Summarization Models",
author = "Cao, Meng and
Dong, Yue and
Wu, Jiapeng and
Cheung, Jackie Chi Kit",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.emnlp-main.506/",
doi = "10.18653/v1/2020.emnlp-main.506",
pages = "6251--6258",
abstract = "Neural abstractive summarization systems have achieved promising progress, thanks to the availability of large-scale datasets and models pre-trained with self-supervised methods. However, ensuring the factual consistency of the generated summaries for abstractive summarization systems is a challenge. We propose a post-editing corrector module to address this issue by identifying and correcting factual errors in generated summaries. The neural corrector model is pre-trained on artificial examples that are created by applying a series of heuristic transformations on reference summaries. These transformations are inspired by the error analysis of state-of-the-art summarization model outputs. Experimental results show that our model is able to correct factual errors in summaries generated by other neural summarization models and outperforms previous models on factual consistency evaluation on the CNN/DailyMail dataset. We also find that transferring from artificial error correction to downstream settings is still very challenging."
}
Markdown (Informal)
[Factual Error Correction for Abstractive Summarization Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.emnlp-main.506/) (Cao et al., EMNLP 2020)
ACL