@inproceedings{sultan-etal-2020-wessa,
title = "{WESSA} at {S}em{E}val-2020 Task 9: Code-Mixed Sentiment Analysis Using Transformers",
author = "Sultan, Ahmed and
Salim, Mahmoud and
Gaber, Amina and
El Hosary, Islam",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.181/",
doi = "10.18653/v1/2020.semeval-1.181",
pages = "1342--1347",
abstract = "In this paper, we describe our system submitted for SemEval 2020 Task 9, Sentiment Analysis for Code-Mixed Social Media Text alongside other experiments. Our best performing system is a Transfer Learning-based model that fine-tunes XLM-RoBERTa, a transformer-based multilingual masked language model, on monolingual English and Spanish data and Spanish-English code-mixed data. Our system outperforms the official task baseline by achieving a 70.1{\%} average F1-Score on the official leaderboard using the test set. For later submissions, our system manages to achieve a 75.9{\%} average F1-Score on the test set using CodaLab username {\textquotedblleft}ahmed0sultan{\textquotedblright}."
}
Markdown (Informal)
[WESSA at SemEval-2020 Task 9: Code-Mixed Sentiment Analysis Using Transformers](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.181/) (Sultan et al., SemEval 2020)
ACL