@inproceedings{hannani-etal-2024-assessing,
title = "Assessing the Performance of {C}hat{GPT}-4, Fine-tuned {BERT} and Traditional {ML} Models on {M}oroccan {A}rabic Sentiment Analysis",
author = "Hannani, Mohamed and
Soudi, Abdelhadi and
Van Laerhoven, Kristof",
editor = {H{\"a}m{\"a}l{\"a}inen, Mika and
{\"O}hman, Emily and
Miyagawa, So and
Alnajjar, Khalid and
Bizzoni, Yuri},
booktitle = "Proceedings of the 4th International Conference on Natural Language Processing for Digital Humanities",
month = nov,
year = "2024",
address = "Miami, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.nlp4dh-1.47/",
doi = "10.18653/v1/2024.nlp4dh-1.47",
pages = "489--498",
abstract = "Large Language Models (LLMs) have demonstrated impressive capabilities in various natural language processing tasks across different languages. However, their performance in low-resource languages and dialects, such as Moroccan Arabic (MA), requires further investigation. This study evaluates the performance of ChatGPT-4, different fine-tuned BERT models, FastText as text representation, and traditional machine learning models on MA sentiment analysis. Experiments were done on two open source MA datasets: an X(Twitter) Moroccan Arabic corpus (MAC) and a Moroccan Arabic YouTube corpus (MYC) datasets to assess their capabilities on sentiment text classification. We compare the performance of fully fine-tuned and pre-trained Arabic BERT-based models with ChatGPT-4 in zero-shot settings."
}
Markdown (Informal)
[Assessing the Performance of ChatGPT-4, Fine-tuned BERT and Traditional ML Models on Moroccan Arabic Sentiment Analysis](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.nlp4dh-1.47/) (Hannani et al., NLP4DH 2024)
ACL