@inproceedings{chang-zhu-2025-chunk,
title = "A Chunk-based Chain of Thought Prompting Method for Mitigating Over-Correction in {C}hinese Grammatical Error Correction",
author = "Chang, Xinquan and
Zhu, Junguo",
editor = "Sun, Maosong and
Duan, Peiyong and
Liu, Zhiyuan and
Xu, Ruifeng and
Sun, Weiwei",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.63/",
pages = "831--841",
abstract = "``Large Language Models (LLMs) have demonstrated remarkable capabilities in semantic under-standing and text generation. However, when applied to downstream tasks such as Chinese Grammatical Error Correction (CGEC), they often suffer from over-correction issues, where grammatically correct parts are mistakenly altered. Moreover, some existing methods aim to address over-correction in Sequence-to-Sequence (Seq2Seq) models, they are difficult to adapt to decoder-only LLMs. To address these challenges, we propose a Chunk-based Chain ofThought (CoT) Prompting Method. Our study is structured into three key components. Initially, we identify specific types of grammatical errors in the input sentences. Following this,sentences are segmented into smaller chunks, and each chunk is analyzed to match the detected error types. Ultimately, the aggregated information guides LLMs in performing localized correction within the input sentences. The experimental results have proved the effectiveness of our method in mitigating over-correction, achieving higher F0.5 score while maintaining robust grammatical error correction performance. This method provides innovative perspectives on employing LLMs to enhance the precision and granularity of CGEC task.''"
}Markdown (Informal)
[A Chunk-based Chain of Thought Prompting Method for Mitigating Over-Correction in Chinese Grammatical Error Correction](https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.63/) (Chang & Zhu, CCL 2025)
ACL