@inproceedings{amal-etal-2024-anlp,
title = "{ANLP} {RG} at {S}tance{E}val2024: Comparative Evaluation of Stance, Sentiment and Sarcasm Detection",
author = "Amal, Mezghani and
Boujelbane, Rahma and
Ellouze, Mariem",
editor = "Habash, Nizar and
Bouamor, Houda and
Eskander, Ramy and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Abdelali, Ahmed and
Touileb, Samia and
Hamed, Injy and
Onaizan, Yaser and
Alhafni, Bashar and
Antoun, Wissam and
Khalifa, Salam and
Haddad, Hatem and
Zitouni, Imed and
AlKhamissi, Badr and
Almatham, Rawan and
Mrini, Khalil",
booktitle = "Proceedings of the Second Arabic Natural Language Processing Conference",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.arabicnlp-1.90/",
doi = "10.18653/v1/2024.arabicnlp-1.90",
pages = "788--793",
abstract = "As part of our study, we worked on three tasks:stance detection, sarcasm detection and senti-ment analysis using fine-tuning techniques onBERT-based models. Fine-tuning parameterswere carefully adjusted over multiple iterationsto maximize model performance. The threetasks are essential in the field of natural lan-guage processing (NLP) and present uniquechallenges. Stance detection is a critical taskaimed at identifying a writer`s stances or view-points in relation to a topic. Sarcasm detectionseeks to spot sarcastic expressions, while senti-ment analysis determines the attitude expressedin a text. After numerous experiments, we iden-tified Arabert-twitter as the model offering thebest performance for all three tasks. In particu-lar, it achieves a macro F-score of 78.08{\%} forstance detection, a macro F1-score of 59.51{\%}for sarcasm detection and a macro F1-score of64.57{\%} for sentiment detection. .Our source code is available at https://github.com/MezghaniAmal/Mawqif"
}
Markdown (Informal)
[ANLP RG at StanceEval2024: Comparative Evaluation of Stance, Sentiment and Sarcasm Detection](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.arabicnlp-1.90/) (Amal et al., ArabicNLP 2024)
ACL