@inproceedings{bhange-kasliwal-2020-hinglishnlp,
title = "{H}inglish{NLP} at {S}em{E}val-2020 Task 9: Fine-tuned Language Models for {H}inglish Sentiment Detection",
author = "Bhange, Meghana and
Kasliwal, Nirant",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2020.semeval-1.119/",
doi = "10.18653/v1/2020.semeval-1.119",
pages = "934--939",
abstract = "Sentiment analysis for code-mixed social media text continues to be an under-explored area. This work adds two common approaches: fine-tuning large transformer models and sample efficient methods like ULMFiT. Prior work demonstrates the efficacy of classical ML methods for polarity detection. Fine-tuned general-purpose language representation models, such as those of the BERT family are benchmarked along with classical machine learning and ensemble methods. We show that NB-SVM beats RoBERTa by 6.2{\%} (relative) F1. The best performing model is a majority-vote ensemble which achieves an F1 of 0.707. The leaderboard submission was made under the codalab username nirantk, with F1 of 0.689."
}
Markdown (Informal)
[HinglishNLP at SemEval-2020 Task 9: Fine-tuned Language Models for Hinglish Sentiment Detection](https://preview.aclanthology.org/landing_page/2020.semeval-1.119/) (Bhange & Kasliwal, SemEval 2020)
ACL