@inproceedings{srinivasan-2020-msr,
title = "{MSR} {I}ndia at {S}em{E}val-2020 Task 9: Multilingual Models Can Do Code-Mixing Too",
author = "Srinivasan, Anirudh",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.122/",
doi = "10.18653/v1/2020.semeval-1.122",
pages = "951--956",
abstract = "In this paper, we present our system for the SemEval 2020 task on code-mixed sentiment analysis. Our system makes use of large transformer based multilingual embeddings like mBERT. Recent work has shown that these models posses the ability to solve code-mixed tasks in addition to their originally demonstrated cross-lingual abilities. We evaluate the stock versions of these models for the sentiment analysis task and also show that their performance can be improved by using unlabelled code-mixed data. Our submission (username Genius1237) achieved the second rank on the English-Hindi subtask with an F1 score of 0.726."
}
Markdown (Informal)
[MSR India at SemEval-2020 Task 9: Multilingual Models Can Do Code-Mixing Too](https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.122/) (Srinivasan, SemEval 2020)
ACL