@inproceedings{kale-rastogi-2020-text,
title = "Text-to-Text Pre-Training for Data-to-Text Tasks",
author = "Kale, Mihir and
Rastogi, Abhinav",
editor = "Davis, Brian and
Graham, Yvette and
Kelleher, John and
Sripada, Yaji",
booktitle = "Proceedings of the 13th International Conference on Natural Language Generation",
month = dec,
year = "2020",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.inlg-1.14/",
doi = "10.18653/v1/2020.inlg-1.14",
pages = "97--102",
abstract = "We study the pre-train + fine-tune strategy for data-to-text tasks. Our experiments indicate that text-to-text pre-training in the form of T5 (Raffel et al., 2019), enables simple, end-to-end transformer based models to outperform pipelined neural architectures tailored for data-to-text generation, as well as alternatives such as BERT and GPT-2. Importantly, T5 pre-training leads to better generalization, as evidenced by large improvements on out-ofdomain test sets. We hope our work serves as a useful baseline for future research, as transfer learning becomes ever more prevalent for data-to-text tasks."
}
Markdown (Informal)
[Text-to-Text Pre-Training for Data-to-Text Tasks](https://preview.aclanthology.org/fix-sig-urls/2020.inlg-1.14/) (Kale & Rastogi, INLG 2020)
ACL
- Mihir Kale and Abhinav Rastogi. 2020. Text-to-Text Pre-Training for Data-to-Text Tasks. In Proceedings of the 13th International Conference on Natural Language Generation, pages 97–102, Dublin, Ireland. Association for Computational Linguistics.