@inproceedings{kumar-etal-2020-data,
title = "Data Augmentation using Pre-trained Transformer Models",
author = "Kumar, Varun and
Choudhary, Ashutosh and
Cho, Eunah",
editor = "Campbell, William M. and
Waibel, Alex and
Hakkani-Tur, Dilek and
Hazen, Timothy J. and
Kilgour, Kevin and
Cho, Eunah and
Kumar, Varun and
Glaude, Hadrien",
booktitle = "Proceedings of the 2nd Workshop on Life-long Learning for Spoken Language Systems",
month = dec,
year = "2020",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.lifelongnlp-1.3/",
doi = "10.18653/v1/2020.lifelongnlp-1.3",
pages = "18--26",
abstract = "Language model based pre-trained models such as BERT have provided significant gains across different NLP tasks. In this paper, we study different types of transformer based pre-trained models such as auto-regressive models (GPT-2), auto-encoder models (BERT), and seq2seq models (BART) for conditional data augmentation. We show that prepending the class labels to text sequences provides a simple yet effective way to condition the pre-trained models for data augmentation. Additionally, on three classification benchmarks, pre-trained Seq2Seq model outperforms other data augmentation methods in a low-resource setting. Further, we explore how different pre-trained model based data augmentation differs in-terms of data diversity, and how well such methods preserve the class-label information."
}
Markdown (Informal)
[Data Augmentation using Pre-trained Transformer Models](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.lifelongnlp-1.3/) (Kumar et al., lifelongnlp 2020)
ACL