@inproceedings{mubarak-etal-2026-nahw,
title = "Nahw: A Comprehensive Benchmark of {A}rabic Grammar Understanding, Error Detection, Correction, and Explanation",
author = "Mubarak, Hamdy and
Hawasly, Majd and
Mohamed, Abubakr",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.296/",
pages = "6310--6328",
ISBN = "979-8-89176-380-7",
abstract = "Grammar comprehension is a critical capability for large language models (LLMs) to achieve fluency in a target language. In low-resource settings, such as the case with Arabic, limited availability of high-quality data can lead to significant gaps in grammatical understanding, making systematic evaluation essential. We introduce Nahw, a comprehensive benchmark for Arabic grammar that covers both theoretical knowledge and practical applications, including grammatical error detection, correction, and explanation. We evaluate a range of LLMs on these tasks and find that many models still exhibit substantial deficiencies in Arabic grammar comprehension, with GPT-4o achieving a score of 67{\%} on average over all tasks, while the best performing Arabic model in our experiment (ALLaM-7B) achieving 42{\%}. Our experiments also demonstrate that while fine-tuning with synthetic data can improve performance, it does not match the effectiveness of training on natural, high-quality data."
}Markdown (Informal)
[Nahw: A Comprehensive Benchmark of Arabic Grammar Understanding, Error Detection, Correction, and Explanation](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.296/) (Mubarak et al., EACL 2026)
ACL