@inproceedings{tran-kruschwitz-2021-ur,
title = "ur-iw-hnt at {G}erm{E}val 2021: An Ensembling Strategy with Multiple {BERT} Models",
author = "Tran, Hoai Nam and
Kruschwitz, Udo",
editor = "Risch, Julian and
Stoll, Anke and
Wilms, Lena and
Wiegand, Michael",
booktitle = "Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments",
month = sep,
year = "2021",
address = "Duesseldorf, Germany",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.germeval-1.12/",
pages = "83--87",
abstract = "This paper describes our approach (ur-iw-hnt) for the Shared Task of GermEval2021 to identify toxic, engaging, and fact-claiming comments. We submitted three runs using an ensembling strategy by majority (hard) voting with multiple different BERT models of three different types: German-based, Twitter-based, and multilingual models. All ensemble models outperform single models, while BERTweet is the winner of all individual models in every subtask. Twitter-based models perform better than GermanBERT models, and multilingual models perform worse but by a small margin."
}
Markdown (Informal)
[ur-iw-hnt at GermEval 2021: An Ensembling Strategy with Multiple BERT Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.germeval-1.12/) (Tran & Kruschwitz, GermEval 2021)
ACL