@inproceedings{janiszewski-etal-2020-pum,
title = "{PUM} at {S}em{E}val-2020 Task 12: Aggregation of Transformer-based Models' Features for Offensive Language Recognition",
author = "Janiszewski, Piotr and
Skiba, Mateusz and
Wali{\'n}ska, Urszula",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.semeval-1.210/",
doi = "10.18653/v1/2020.semeval-1.210",
pages = "1615--1621",
abstract = "In this paper, we describe the PUM team`s entry to the SemEval-2020 Task 12. Creating our solution involved leveraging two well-known pretrained models used in natural language processing: BERT and XLNet, which achieve state-of-the-art results in multiple NLP tasks. The models were fine-tuned for each subtask separately and features taken from their hidden layers were combinedand fed into a fully connected neural network. The model using aggregated Transformer featurescan serve as a powerful tool for offensive language identification problem. Our team was ranked7th out of 40 in Sub-task C - Offense target identification with 64.727{\%} macro F1-score and 64thout of 85 in Sub-task A - Offensive language identification (89.726{\%} F1-score)."
}
Markdown (Informal)
[PUM at SemEval-2020 Task 12: Aggregation of Transformer-based Models’ Features for Offensive Language Recognition](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.semeval-1.210/) (Janiszewski et al., SemEval 2020)
ACL