@inproceedings{lai-etal-2021-thank,
title = "Thank you {BART}! Rewarding Pre-Trained Models Improves Formality Style Transfer",
author = "Lai, Huiyuan and
Toral, Antonio and
Nissim, Malvina",
editor = "Zong, Chengqing and
Xia, Fei and
Li, Wenjie and
Navigli, Roberto",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.acl-short.62/",
doi = "10.18653/v1/2021.acl-short.62",
pages = "484--494",
abstract = "Scarcity of parallel data causes formality style transfer models to have scarce success in preserving content. We show that fine-tuning pre-trained language (GPT-2) and sequence-to-sequence (BART) models boosts content preservation, and that this is possible even with limited amounts of parallel data. Augmenting these models with rewards that target style and content {--}the two core aspects of the task{--} we achieve a new state-of-the-art."
}
Markdown (Informal)
[Thank you BART! Rewarding Pre-Trained Models Improves Formality Style Transfer](https://preview.aclanthology.org/fix-sig-urls/2021.acl-short.62/) (Lai et al., ACL-IJCNLP 2021)
ACL
- Huiyuan Lai, Antonio Toral, and Malvina Nissim. 2021. Thank you BART! Rewarding Pre-Trained Models Improves Formality Style Transfer. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 484–494, Online. Association for Computational Linguistics.