@inproceedings{ma-etal-2024-prom,
title = "{PROM}: A Phrase-level Copying Mechanism with Pre-training for Abstractive Summarization",
author = "Ma, Xinbei and
Gong, Yeyun and
He, Pengcheng and
Zhao, Hai and
Duan, Nan",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.lrec-main.1148/",
pages = "13103--13119",
abstract = "Based on the remarkable achievements of pre-trained language models in abstractive summarization, the copying mechanism has proved helpful by improving the factuality, stability, and overall performance. This work proposes PROM, a new PhRase-level cOpying Mechanism that enhances attention on n-grams, which can be applied to zero-shot summarization with pre-training. PROM adds an indicator layer to explicitly pick up tokens in n-gram that can be copied from the source, and calculates an auxiliary loss for the copying prediction. Empirical studies show that PROM makes significant improvements in fine-tuning on benchmarks. In the zero-shot setting, PROM is utilized in the self-supervised pre-training on raw corpora and provides new general baselines on a wide range of summarization datasets. Further analysis shows that PROM performs more reasonable copying and contributes to faithfulness. Our code is publicly available at https://github.com/xbmxb/PROM."
}
Markdown (Informal)
[PROM: A Phrase-level Copying Mechanism with Pre-training for Abstractive Summarization](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.lrec-main.1148/) (Ma et al., LREC-COLING 2024)
ACL