@inproceedings{orabe-etal-2020-dothemath,
title = "{D}o{T}he{M}ath at {S}em{E}val-2020 Task 12 : Deep Neural Networks with Self Attention for {A}rabic Offensive Language Detection",
author = "Orabe, Zoher and
Haddad, Bushr and
Ghneim, Nada and
Al-Abood, Anas",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.254/",
doi = "10.18653/v1/2020.semeval-1.254",
pages = "1932--1937",
abstract = "This paper describes our team work and submission for the SemEval 2020 (Sub-Task A) {\textquotedblleft}Offensive Eval: Identifying and Categorizing Offensive Arabic Language in Arabic Social Media{\textquotedblright}. Our two baseline models were based on different levels of representation: character vs. word level. In word level based representation we implemented a convolutional neural network model and a bi-directional GRU model. In character level based representation we implemented a hyper CNN and LSTM model. All of these models have been further augmented with attention layers for a better performance on our task. We also experimented with three types of static word embeddings: word2vec, FastText, and Glove, in addition to emoji embeddings, and compared the performance of the different deep learning models on the dataset provided by this task. The bi-directional GRU model with attention has achieved the highest score (0.85{\%} F1 score) among all other models."
}
Markdown (Informal)
[DoTheMath at SemEval-2020 Task 12 : Deep Neural Networks with Self Attention for Arabic Offensive Language Detection](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.254/) (Orabe et al., SemEval 2020)
ACL