@inproceedings{azizov-etal-2023-frank,
title = "Frank at {A}r{AIE}val Shared Task: {A}rabic Persuasion and Disinformation: The Power of Pretrained Models",
author = "Azizov, Dilshod and
Li, Jiyong and
Liang, Shangsong",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.arabicnlp-1.59/",
doi = "10.18653/v1/2023.arabicnlp-1.59",
pages = "583--588",
abstract = {In this work, we present our systems developed for ``ArAIEval'' shared task of ArabicNLP 2023 (CITATION). We used an mBERT transformer for Subtask 1A, which targets persuasion in Arabic tweets, and we used the MARBERT transformer for Subtask 2A to identify disinformation in Arabic tweets. Our persuasion detection system achieved micro-F1 of \textbf{0.745} by surpassing the baseline by 13.2{\%}, and registered a macro-F1 of 0.717 based on leaderboard scores. Similarly, our disinformation system recorded a micro-F1 of \textbf{0.816}, besting the na{\"i}ve majority by 6.7{\%}, with a macro-F1 of 0.637. Furthermore, we present our preliminary results on a variety of pre-trained models. In terms of overall ranking, our systems placed $7^\text{th}$ out of 16 and $12^\text{th}$ out of 17 teams for Subtasks 1A and 2A, respectively.}
}
Markdown (Informal)
[Frank at ArAIEval Shared Task: Arabic Persuasion and Disinformation: The Power of Pretrained Models](https://preview.aclanthology.org/fix-sig-urls/2023.arabicnlp-1.59/) (Azizov et al., ArabicNLP 2023)
ACL