@inproceedings{wiedemann-etal-2020-uhh,
title = "{UHH}-{LT} at {S}em{E}val-2020 Task 12: Fine-Tuning of Pre-Trained Transformer Networks for Offensive Language Detection",
author = "Wiedemann, Gregor and
Yimam, Seid Muhie and
Biemann, Chris",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.213/",
doi = "10.18653/v1/2020.semeval-1.213",
pages = "1638--1644",
abstract = "Fine-tuning of pre-trained transformer networks such as BERT yield state-of-the-art results for text classification tasks. Typically, fine-tuning is performed on task-specific training datasets in a supervised manner. One can also fine-tune in unsupervised manner beforehand by further pre-training the masked language modeling (MLM) task. Hereby, in-domain data for unsupervised MLM resembling the actual classification target dataset allows for domain adaptation of the model. In this paper, we compare current pre-trained transformer networks with and without MLM fine-tuning on their performance for offensive language detection. Our MLM fine-tuned RoBERTa-based classifier officially ranks 1st in the SemEval 2020 Shared Task 12 for the English language. Further experiments with the ALBERT model even surpass this result."
}
Markdown (Informal)
[UHH-LT at SemEval-2020 Task 12: Fine-Tuning of Pre-Trained Transformer Networks for Offensive Language Detection](https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.213/) (Wiedemann et al., SemEval 2020)
ACL