@inproceedings{paraschiv-etal-2021-upb,
title = "{UPB} at {S}em{E}val-2021 Task 5: Virtual Adversarial Training for Toxic Spans Detection",
author = "Paraschiv, Andrei and
Cercel, Dumitru-Clementin and
Dascalu, Mihai",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.26/",
doi = "10.18653/v1/2021.semeval-1.26",
pages = "225--232",
abstract = "The real-world impact of polarization and toxicity in the online sphere marked the end of 2020 and the beginning of this year in a negative way. Semeval-2021, Task 5 - Toxic Spans Detection is based on a novel annotation of a subset of the Jigsaw Unintended Bias dataset and is the first language toxicity detection task dedicated to identifying the toxicity-level spans. For this task, participants had to automatically detect character spans in short comments that render the message as toxic. Our model considers applying Virtual Adversarial Training in a semi-supervised setting during the fine-tuning process of several Transformer-based models (i.e., BERT and RoBERTa), in combination with Conditional Random Fields. Our approach leads to performance improvements and more robust models, enabling us to achieve an F1-score of 65.73{\%} in the official submission and an F1-score of 66.13{\%} after further tuning during post-evaluation."
}
Markdown (Informal)
[UPB at SemEval-2021 Task 5: Virtual Adversarial Training for Toxic Spans Detection](https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.26/) (Paraschiv et al., SemEval 2021)
ACL