@inproceedings{oumer-etal-2023-itri,
title = "Itri Amigos at {A}r{AIE}val Shared Task: Transformer vs. Compression-Based Models for Persuasion Techniques and Disinformation Detection",
author = "Oumer, Jehad and
Ahmed, Nouman and
Flechas Manrique, Natalia",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.arabicnlp-1.53/",
doi = "10.18653/v1/2023.arabicnlp-1.53",
pages = "543--548",
abstract = "Social media has significantly amplified the dissemination of misinformation. Researchers have employed natural language processing and machine learning techniques to identify and categorize false information on these platforms. While there is a well-established body of research on detecting fake news in English and Latin languages, the study of Arabic fake news detection remains limited. This paper describes the methods used to tackle the challenges of the ArAIEval shared Task 2023. We conducted experiments with both monolingual Arabic and multi-lingual pre-trained Language Models (LM). We found that the monolingual Arabic models outperformed in all four subtasks. Additionally, we explored a novel lossless compression method, which, while not surpassing pretrained LM performance, presents an intriguing avenue for future experimentation to achieve comparable results in a more efficient and rapid manner."
}
Markdown (Informal)
[Itri Amigos at ArAIEval Shared Task: Transformer vs. Compression-Based Models for Persuasion Techniques and Disinformation Detection](https://preview.aclanthology.org/fix-sig-urls/2023.arabicnlp-1.53/) (Oumer et al., ArabicNLP 2023)
ACL