@inproceedings{jaber-martinez-2023-ptuk,
title = "{PTUK}-{HULAT} at {A}r{AIE}val Shared Task Fine-tuned Distilbert to Predict Disinformative Tweets",
author = "Jaber, Areej and
Martinez, Paloma",
editor = "Sawaf, Hassan and
El-Beltagy, Samhaa and
Zaghouani, Wajdi and
Magdy, Walid and
Abdelali, Ahmed and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Habash, Nizar and
Khalifa, Salam and
Keleg, Amr and
Haddad, Hatem and
Zitouni, Imed and
Mrini, Khalil and
Almatham, Rawan",
booktitle = "Proceedings of ArabicNLP 2023",
month = dec,
year = "2023",
address = "Singapore (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.arabicnlp-1.50/",
doi = "10.18653/v1/2023.arabicnlp-1.50",
pages = "525--529",
abstract = "Disinformation involves the dissemination of incomplete, inaccurate, or misleading information; it has the objective, goal, or purpose of deliberately or intentionally lying to others aboutthe truth. The spread of disinformative information on social media has serious implications, and it causes concern among internet users in different aspects. Automatic classification models are required to detect disinformative posts on social media, especially on Twitter. In this article, DistilBERT multilingual model was fine-tuned to classify tweets either as dis-informative or not dis-informative in Subtask 2A of the ArAIEval shared task. The system outperformed the baseline and achieved F1 micro 87{\%} and F1 macro 80{\%}. Our system ranked 11 compared with all participants."
}
Markdown (Informal)
[PTUK-HULAT at ArAIEval Shared Task Fine-tuned Distilbert to Predict Disinformative Tweets](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.arabicnlp-1.50/) (Jaber & Martinez, ArabicNLP 2023)
ACL