@inproceedings{sun-etal-2023-teaching,
title = "Teaching the Pre-trained Model to Generate Simple Texts for Text Simplification",
author = "Sun, Renliang and
Xu, Wei and
Wan, Xiaojun",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.595/",
doi = "10.18653/v1/2023.findings-acl.595",
pages = "9345--9355",
abstract = "Randomly masking text spans in ordinary texts in the pre-training stage hardly allows models to acquire the ability to generate simple texts. It can hurt the performance of pre-trained models on text simplification tasks. In this paper, we propose a new continued pre-training strategy to teach the pre-trained model to generate simple texts. We continue pre-training BART, a representative model, to obtain SimpleBART. It consistently and significantly improves the results on lexical simplification, sentence simplification, and document-level simplification tasks over BART. At the end, we compare SimpleBART with several representative large language models (LLMs)."
}
Markdown (Informal)
[Teaching the Pre-trained Model to Generate Simple Texts for Text Simplification](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.595/) (Sun et al., Findings 2023)
ACL