@inproceedings{aftab-malik-2022-erock,
title = "e{R}ock at Qur{'}an {QA} 2022: Contemporary Deep Neural Networks for Qur{'}an based Reading Comprehension Question Answers",
author = "Aftab, Esha and
Malik, Muhammad Kamran",
editor = "Al-Khalifa, Hend and
Elsayed, Tamer and
Mubarak, Hamdy and
Al-Thubaity, Abdulmohsen and
Magdy, Walid and
Darwish, Kareem",
booktitle = "Proceedinsg of the 5th Workshop on Open-Source Arabic Corpora and Processing Tools with Shared Tasks on Qur'an QA and Fine-Grained Hate Speech Detection",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.osact-1.11/",
pages = "96--103",
abstract = "Question Answering (QA) has enticed the interest of NLP community in recent years. NLP enthusiasts are engineering new Models and fine-tuning the existing ones that can give out answers for the posed questions. The deep neural network models are found to perform exceptionally on QA tasks, but these models are also data intensive. For instance, BERT has outperformed many of its contemporary contenders on SQuAD dataset. In this work, we attempt at solving the closed domain reading comprehension Question Answering task on QRCD (Qur{'}anic Reading Comprehension Dataset) to extract an answer span from the provided passage, using BERT as a baseline model. We improved the model{'}s output by applying regularization techniques like weight-decay and data augmentation. Using different strategies we had 0.59{\%} and 0.31{\%} partial Reciprocal Ranking (pRR) on development and testing data splits respectively."
}
Markdown (Informal)
[eRock at Qur’an QA 2022: Contemporary Deep Neural Networks for Qur’an based Reading Comprehension Question Answers](https://preview.aclanthology.org/fix-sig-urls/2022.osact-1.11/) (Aftab & Malik, OSACT 2022)
ACL