@inproceedings{mishra-etal-2020-multilingual,
title = "Multilingual Joint Fine-tuning of Transformer models for identifying Trolling, Aggression and Cyberbullying at {TRAC} 2020",
author = "Mishra, Sudhanshu and
Prasad, Shivangi and
Mishra, Shubhanshu",
editor = "Kumar, Ritesh and
Ojha, Atul Kr. and
Lahiri, Bornini and
Zampieri, Marcos and
Malmasi, Shervin and
Murdock, Vanessa and
Kadar, Daniel",
booktitle = "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association (ELRA)",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.trac-1.19/",
pages = "120--125",
language = "eng",
ISBN = "979-10-95546-56-6",
abstract = "We present our team {\textquoteleft}3Idiots' (referred as {\textquoteleft}sdhanshu' in the official rankings) approach for the Trolling, Aggression and Cyberbullying (TRAC) 2020 shared tasks. Our approach relies on fine-tuning various Transformer models on the different datasets. We also investigated the utility of task label marginalization, joint label classification, and joint training on multilingual datasets as possible improvements to our models. Our team came second in English sub-task A, a close fourth in the English sub-task B and third in the remaining 4 sub-tasks. We find the multilingual joint training approach to be the best trade-off between computational efficiency of model deployment and model`s evaluation performance. We open source our approach at \url{https://github.com/socialmediaie/TRAC2020}."
}
Markdown (Informal)
[Multilingual Joint Fine-tuning of Transformer models for identifying Trolling, Aggression and Cyberbullying at TRAC 2020](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.trac-1.19/) (Mishra et al., TRAC 2020)
ACL