@inproceedings{lin-cheng-2022-r,
title = "{R}-{T}ea{F}or: Regularized Teacher-Forcing for Abstractive Summarization",
author = "Lin, Guan-Yu and
Cheng, Pu-Jen",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.emnlp-main.423/",
doi = "10.18653/v1/2022.emnlp-main.423",
pages = "6303--6311",
abstract = "Teacher-forcing is widely used in training sequence generation models to improve sampling efficiency and to stabilize training. However, teacher-forcing is vulnerable to the exposure bias problem. Previous works have attempted to address exposure bias by modifying the training data to simulate model-generated results. Nevertheless, they do not consider the pairwise relationship between the original training data and the modified ones, which provides more information during training. Hence, we propose Regularized Teacher-Forcing (R-TeaFor) to utilize this relationship for better regularization. Empirically, our experiments show that R-TeaFor outperforms previous summarization state-of-the-art models, and the results can be generalized to different pre-trained models."
}
Markdown (Informal)
[R-TeaFor: Regularized Teacher-Forcing for Abstractive Summarization](https://preview.aclanthology.org/fix-sig-urls/2022.emnlp-main.423/) (Lin & Cheng, EMNLP 2022)
ACL