@inproceedings{liang-etal-2025-improving-proficiency,
title = "Improving Proficiency and Grammar Accuracy for {C}hinese Language Learners with Large Language Models",
author = "Liang, Yuqi and
Xu, Wenjing and
Xu, Hongzhi",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.74/",
pages = "1216--1232",
ISBN = "979-8-89176-303-6",
abstract = "In this study, we evaluate the performance of large language models (LLMs) in detecting and correcting grammatical errors made by Chinese language learners. We find that incorporating various linguistic features{---}such as dependency structures, parts of speech, and pinyin transliteration{---}into the prompts can potentially enhance model performance. Among these features, parts of speech and pinyin prove to be the most effective across all tested models. Additionally, our findings show that the success of error correction also depends on the severity of the errors. When the intended meaning is preserved, LLMs tend to provide accurate revisions following the principle of minimal editing. However, when the meaning is obscured, LLMs are more likely to produce divergent outputs, both in comparison to reference corrections and to the responses of other models."
}Markdown (Informal)
[Improving Proficiency and Grammar Accuracy for Chinese Language Learners with Large Language Models](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.74/) (Liang et al., Findings 2025)
ACL