@inproceedings{prasad-etal-2022-gjg,
title = "{GJG}@{T}amil{NLP}-{ACL}2022: Emotion Analysis and Classification in {T}amil using Transformers",
author = "Prasad, Janvi and
Prasad, Gaurang and
C, Gunavathi",
editor = "Chakravarthi, Bharathi Raja and
Priyadharshini, Ruba and
Madasamy, Anand Kumar and
Krishnamurthy, Parameswari and
Sherly, Elizabeth and
Mahesan, Sinnathamby",
booktitle = "Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.dravidianlangtech-1.14/",
doi = "10.18653/v1/2022.dravidianlangtech-1.14",
pages = "86--92",
abstract = "This paper describes the systems built by our team for the ``Emotion Analysis in Tamil'' shared task at the Second Workshop on Speech and Language Technologies for Dravidian Languages at ACL 2022. There were two multi-class classification sub-tasks as a part of this shared task. The dataset for sub-task A contained 11 types of emotions while sub-task B was more fine-grained with 31 emotions. We fine-tuned an XLM-RoBERTa and DeBERTA base model for each sub-task. For sub-task A, the XLM-RoBERTa model achieved an accuracy of 0.46 and the DeBERTa model achieved an accuracy of 0.45. We had the best classification performance out of 11 teams for sub-task A. For sub-task B, the XLM-RoBERTa model{'}s accuracy was 0.33 and the DeBERTa model had an accuracy of 0.26. We ranked 2nd out of 7 teams for sub-task B."
}
Markdown (Informal)
[GJG@TamilNLP-ACL2022: Emotion Analysis and Classification in Tamil using Transformers](https://preview.aclanthology.org/fix-sig-urls/2022.dravidianlangtech-1.14/) (Prasad et al., DravidianLangTech 2022)
ACL