@inproceedings{nasr-ben-hajhmida-2024-senit,
title = "{SENIT} at {A}ra{F}in{NLP}2024: trust your model or combine two",
author = "Nasr, Abdelmomen and
Ben HajHmida, Moez",
editor = "Habash, Nizar and
Bouamor, Houda and
Eskander, Ramy and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Abdelali, Ahmed and
Touileb, Samia and
Hamed, Injy and
Onaizan, Yaser and
Alhafni, Bashar and
Antoun, Wissam and
Khalifa, Salam and
Haddad, Hatem and
Zitouni, Imed and
AlKhamissi, Badr and
Almatham, Rawan and
Mrini, Khalil",
booktitle = "Proceedings of the Second Arabic Natural Language Processing Conference",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.arabicnlp-1.39/",
doi = "10.18653/v1/2024.arabicnlp-1.39",
pages = "428--432",
abstract = "We describe our submitted system to the 2024 Shared Task on The Arabic Financial NLP (Malaysha et al., 2024). We tackled Subtask 1, namely Multi-dialect Intent Detection. We used state-of-the-art pretrained contextualized text representation models and fine-tuned them according to the downstream task at hand. We started by finetuning multilingual BERT and various Arabic variants, namely MARBERTV1, MARBERTV2, and CAMeLBERT. Then, we employed an ensembling technique to improve our classification performance combining MARBERTV2 and CAMeLBERT embeddings. The findings indicate that MARBERTV2 surpassed all the other models mentioned."
}
Markdown (Informal)
[SENIT at AraFinNLP2024: trust your model or combine two](https://preview.aclanthology.org/fix-sig-urls/2024.arabicnlp-1.39/) (Nasr & Ben HajHmida, ArabicNLP 2024)
ACL