@inproceedings{pamies-etal-2020-lt,
title = "{LT}@{H}elsinki at {S}em{E}val-2020 Task 12: Multilingual or Language-specific {BERT}?",
author = {P{\`a}mies, Marc and
{\"O}hman, Emily and
Kajava, Kaisla and
Tiedemann, J{\"o}rg},
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.205/",
doi = "10.18653/v1/2020.semeval-1.205",
pages = "1569--1575",
abstract = "This paper presents the different models submitted by the LT@Helsinki team for the SemEval 2020 Shared Task 12. Our team participated in sub-tasks A and C; titled offensive language identification and offense target identification, respectively. In both cases we used the so-called Bidirectional Encoder Representation from Transformer (BERT), a model pre-trained by Google and fine-tuned by us on the OLID and SOLID datasets. The results show that offensive tweet classification is one of several language-based tasks where BERT can achieve state-of-the-art results."
}
Markdown (Informal)
[LT@Helsinki at SemEval-2020 Task 12: Multilingual or Language-specific BERT?](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.205/) (Pàmies et al., SemEval 2020)
ACL