@inproceedings{zhu-etal-2024-model,
title = "The Model Arena for Cross-lingual Sentiment Analysis: A Comparative Study in the Era of Large Language Models",
author = "Zhu, Xiliang and
Gardiner, Shayna and
Rold{\'a}n, Tere and
Rossouw, David",
editor = "De Clercq, Orph{\'e}e and
Barriere, Valentin and
Barnes, Jeremy and
Klinger, Roman and
Sedoc, Jo{\~a}o and
Tafreshi, Shabnam",
booktitle = "Proceedings of the 14th Workshop on Computational Approaches to Subjectivity, Sentiment, {\&} Social Media Analysis",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.wassa-1.12/",
doi = "10.18653/v1/2024.wassa-1.12",
pages = "141--152",
abstract = "Sentiment analysis serves as a pivotal component in Natural Language Processing (NLP). Advancements in multilingual pre-trained models such as XLM-R and mT5 have contributed to the increasing interest in cross-lingual sentiment analysis. The recent emergence in Large Language Models (LLM) has significantly advanced general NLP tasks, however, the capability of such LLMs in cross-lingual sentiment analysis has not been fully studied. This work undertakes an empirical analysis to compare the cross-lingual transfer capability of public Small Multilingual Language Models (SMLM) like XLM-R, against English-centric LLMs such as Llama-3, in the context of sentiment analysis across English, Spanish, French and Chinese. Our findings reveal that among public models, SMLMs exhibit superior zero-shot cross-lingual performance relative to LLMs. However, in few-shot cross-lingual settings, public LLMs demonstrate an enhanced adaptive potential. In addition, we observe that proprietary GPT-3.5 and GPT-4 lead in zero-shot cross-lingual capability, but are outpaced by public models in few-shot scenarios."
}
Markdown (Informal)
[The Model Arena for Cross-lingual Sentiment Analysis: A Comparative Study in the Era of Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2024.wassa-1.12/) (Zhu et al., WASSA 2024)
ACL