@inproceedings{nguyen-etal-2020-bertweet,
title = "{BERT}weet: A pre-trained language model for {E}nglish Tweets",
author = "Nguyen, Dat Quoc and
Vu, Thanh and
Tuan Nguyen, Anh",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = oct,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-demos.2",
doi = "10.18653/v1/2020.emnlp-demos.2",
pages = "9--14",
abstract = "We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al., 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification. We release BERTweet under the MIT License to facilitate future research and applications on Tweet data. Our BERTweet is available at https://github.com/VinAIResearch/BERTweet",
}
Markdown (Informal)
[BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2) (Nguyen et al., EMNLP 2020)
ACL
- Dat Quoc Nguyen, Thanh Vu, and Anh Tuan Nguyen. 2020. BERTweet: A pre-trained language model for English Tweets. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 9–14, Online. Association for Computational Linguistics.