@inproceedings{labib-etal-2024-cuet,
title = "{CUET}{\_}sstm at {A}r{AIE}val Shared Task: Unimodal (Text) Propagandistic Technique Detection Using Transformer-Based Model",
author = "Labib, Momtazul and
Rahman, Samia and
Murad, Hasan and
Das, Udoy",
editor = "Habash, Nizar and
Bouamor, Houda and
Eskander, Ramy and
Tomeh, Nadi and
Abu Farha, Ibrahim and
Abdelali, Ahmed and
Touileb, Samia and
Hamed, Injy and
Onaizan, Yaser and
Alhafni, Bashar and
Antoun, Wissam and
Khalifa, Salam and
Haddad, Hatem and
Zitouni, Imed and
AlKhamissi, Badr and
Almatham, Rawan and
Mrini, Khalil",
booktitle = "Proceedings of The Second Arabic Natural Language Processing Conference",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.arabicnlp-1.52/",
doi = "10.18653/v1/2024.arabicnlp-1.52",
pages = "507--511",
abstract = "In recent days, propaganda has started to influence public opinion increasingly as social media usage continues to grow. Our research has been part of the first challenge, Unimodal (Text) Propagandistic Technique Detection of ArAIEval shared task at the ArabicNLP 2024 conference, co-located with ACL 2024, identifying specific Arabic text spans using twenty-three propaganda techniques. We have augmented underrepresented techniques in the provided dataset using synonym replacement and have evaluated various machine learning (RF, SVM, MNB), deep learning (BiLSTM), and transformer-based models (bert-base-arabic, Marefa-NER, AraBERT) with transfer learning. Our comparative study has shown that the transformer model {\textquotedblleft}bert-base-arabic{\textquotedblright} has outperformed other models. Evaluating the test set, it has achieved the micro-F1 score of 0.2995 which is the highest. This result has secured our team {\textquotedblleft}CUET{\_}sstm{\textquotedblright} first place among all participants in task 1 of the ArAIEval."
}