@inproceedings{prasad-etal-2022-gjg-tamilnlp,
title = "{GJG}@{T}amil{NLP}-{ACL}2022: Using Transformers for Abusive Comment Classification in {T}amil",
author = "Prasad, Gaurang and
Prasad, Janvi and
C, Gunavathi",
editor = "Chakravarthi, Bharathi Raja and
Priyadharshini, Ruba and
Madasamy, Anand Kumar and
Krishnamurthy, Parameswari and
Sherly, Elizabeth and
Mahesan, Sinnathamby",
booktitle = "Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.dravidianlangtech-1.15/",
doi = "10.18653/v1/2022.dravidianlangtech-1.15",
pages = "93--99",
abstract = "This paper presents transformer-based models for the ``Abusive Comment Detection'' shared task at the Second Workshop on Speech and Language Technologies for Dravidian Languages at ACL 2022. Our team participated in both the multi-class classification sub-tasks as a part of this shared task. The dataset for sub-task A was in Tamil text; while B was code-mixed Tamil-English text. Both the datasets contained 8 classes of abusive comments. We trained an XLM-RoBERTa and DeBERTA base model on the training splits for each sub-task. For sub-task A, the XLM-RoBERTa model achieved an accuracy of 0.66 and the DeBERTa model achieved an accuracy of 0.62. For sub-task B, both the models achieved a classification accuracy of 0.72; however, the DeBERTa model performed better in other classification metrics. Our team ranked 2nd in the code-mixed classification sub-task and 8th in Tamil-text sub-task."
}
Markdown (Informal)
[GJG@TamilNLP-ACL2022: Using Transformers for Abusive Comment Classification in Tamil](https://preview.aclanthology.org/fix-sig-urls/2022.dravidianlangtech-1.15/) (Prasad et al., DravidianLangTech 2022)
ACL