@inproceedings{jayanthi-gupta-2021-sj,
title = "{SJ}{\_}{AJ}@{D}ravidian{L}ang{T}ech-{EACL}2021: Task-Adaptive Pre-Training of Multilingual {BERT} models for Offensive Language Identification",
author = "Jayanthi, Sai Muralidhar and
Gupta, Akshat",
editor = "Chakravarthi, Bharathi Raja and
Priyadharshini, Ruba and
Kumar M, Anand and
Krishnamurthy, Parameswari and
Sherly, Elizabeth",
booktitle = "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages",
month = apr,
year = "2021",
address = "Kyiv",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.dravidianlangtech-1.44/",
pages = "307--312",
abstract = "In this paper we present our submission for the EACL 2021-Shared Task on Offensive Language Identification in Dravidian languages. Our final system is an ensemble of mBERT and XLM-RoBERTa models which leverage task-adaptive pre-training of multilingual BERT models with a masked language modeling objective. Our system was ranked 1st for Kannada, 2nd for Malayalam and 3rd for Tamil."
}
Markdown (Informal)
[SJ_AJ@DravidianLangTech-EACL2021: Task-Adaptive Pre-Training of Multilingual BERT models for Offensive Language Identification](https://preview.aclanthology.org/fix-sig-urls/2021.dravidianlangtech-1.44/) (Jayanthi & Gupta, DravidianLangTech 2021)
ACL