@inproceedings{qarqaz-etal-2021-r00,
title = "R00 at {NLP}4{IF}-2021 Fighting {COVID}-19 Infodemic with Transformers and More Transformers",
author = "Qarqaz, Ahmed and
Abujaber, Dia and
Abdullah, Malak",
editor = "Feldman, Anna and
Da San Martino, Giovanni and
Leberknight, Chris and
Nakov, Preslav",
booktitle = "Proceedings of the Fourth Workshop on NLP for Internet Freedom: Censorship, Disinformation, and Propaganda",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.nlp4if-1.15/",
doi = "10.18653/v1/2021.nlp4if-1.15",
pages = "104--109",
abstract = "This paper describes the winning model in the Arabic NLP4IF shared task for fighting the COVID-19 infodemic. The goal of the shared task is to check disinformation about COVID-19 in Arabic tweets. Our proposed model has been ranked 1st with an F1-Score of 0.780 and an Accuracy score of 0.762. A variety of transformer-based pre-trained language models have been experimented with through this study. The best-scored model is an ensemble of AraBERT-Base, Asafya-BERT, and ARBERT models. One of the study`s key findings is showing the effect the pre-processing can have on every model`s score. In addition to describing the winning model, the current study shows the error analysis."
}
Markdown (Informal)
[R00 at NLP4IF-2021 Fighting COVID-19 Infodemic with Transformers and More Transformers](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.nlp4if-1.15/) (Qarqaz et al., NLP4IF 2021)
ACL