@inproceedings{das-etal-2025-zero,
title = "Zero-Shot Grammar Competency Estimation Using Large Language Model Generated Pseudo Labels",
author = "Das, Sourya Dipta and
Kumar, Shubham and
Yadav, Kuldeep",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.169/",
pages = "3167--3179",
ISBN = "979-8-89176-298-5",
abstract = "Grammar competency estimation is essential for assessing linguistic proficiency in both written and spoken language; however, the spoken modality presents additional challenges due to its spontaneous, unstructured, and disfluent nature. Developing accurate grammar scoring models further requires extensive expert annotation, making large-scale data creation impractical. To address these limitations, we propose a zero-shot grammar competency estimation framework that leverages unlabeled data and Large Language Models (LLMs) without relying on manual labels. During training, we employ LLM-generated predictions on unlabeled data by using grammar competency rubric-based prompts. These predictions, treated as pseudo labels, are utilized to train a transformer-based model through a novel training framework designed to handle label noise effectively. We show that the choice of LLM for pseudo-label generation critically affects model performance and that the ratio of clean-to-noisy samples during training strongly influences stability and accuracy. Finally, a qualitative analysis of error intensity and score prediction confirms the robustness and interpretability of our approach. Experimental results demonstrate the efficacy of our approach in estimating grammar competency scores with high accuracy, paving the way for scalable, low-resource grammar assessment systems."
}Markdown (Informal)
[Zero-Shot Grammar Competency Estimation Using Large Language Model Generated Pseudo Labels](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.169/) (Das et al., IJCNLP-AACL 2025)
ACL
- Sourya Dipta Das, Shubham Kumar, and Kuldeep Yadav. 2025. Zero-Shot Grammar Competency Estimation Using Large Language Model Generated Pseudo Labels. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 3167–3179, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.