@inproceedings{yang-2021-maoqin,
title = "Maoqin @ {D}ravidian{L}ang{T}ech-{EACL}2021: The Application of Transformer-Based Model",
author = "Yang, Maoqin",
editor = "Chakravarthi, Bharathi Raja and
Priyadharshini, Ruba and
Kumar M, Anand and
Krishnamurthy, Parameswari and
Sherly, Elizabeth",
booktitle = "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages",
month = apr,
year = "2021",
address = "Kyiv",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.dravidianlangtech-1.40/",
pages = "281--286",
abstract = "This paper describes the result of team-Maoqin at DravidianLangTech-EACL2021. The provided task consists of three languages(Tamil, Malayalam, and Kannada), I only participate in one of the language task-Malayalam. The goal of this task is to identify offensive language content of the code-mixed dataset of comments/posts in Dravidian Languages (Tamil-English, Malayalam-English, and Kannada-English) collected from social media. This is a classification task at the comment/post level. Given a Youtube comment, systems have to classify it into Not-offensive, Offensive-untargeted, Offensive-targeted-individual, Offensive-targeted-group, Offensive-targeted-other, or Not-in-indented-language. I use the transformer-based language model with BiGRU-Attention to complete this task. To prove the validity of the model, I also use some other neural network models for comparison. And finally, the team ranks 5th in this task with a weighted average F1 score of 0.93 on the private leader board."
}
Markdown (Informal)
[Maoqin @ DravidianLangTech-EACL2021: The Application of Transformer-Based Model](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.dravidianlangtech-1.40/) (Yang, DravidianLangTech 2021)
ACL