@inproceedings{park-etal-2025-instruction,
title = "Instruction-Driven In-Context Learning for Domain-Specific {C}hinese Spelling Correction",
author = "Park, Hyunsoo and
Wu, Hongqiu and
Zhao, Hai",
editor = "Sun, Maosong and
Duan, Peiyong and
Liu, Zhiyuan and
Xu, Ruifeng and
Sun, Weiwei",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.82/",
pages = "1095--1107",
abstract = "This paper investigates domain adaptation in Chinese Spelling Correction (CSC) based on the instruction-following ability of large language models (LLMs). In the instructions, we include a variety of domain-specific requirements for spelling correction, such as the domain{'}s formal-ity or writing tone, which go beyond the considerations of previous CSC research. To evaluate the LLMs' performance on instruction-following, we propose IDSpell, a semi-supervised con-struction pipeline for a CSC dataset containing a wide range of domain-specific sentences along with specific instructions. We construct a dataset with IDSpell and evaluate it on Qwen2.5 andGPT-4o, where we find that instructions serve a meaningful influence in correction, increasing the average F1 score by 10.4{\%} compared to when the instructions are not provided. To further enhance the result, we propose Contrastive Prompting, a method incorporating contrastive false examples into the prompt to better guide the model to understand the instruction. Experiments demonstrate that our method outperforms baseline prompting with an average improvement of 5.4{\%}. Our dataset and code are publicly available for further research."
}Markdown (Informal)
[Instruction-Driven In-Context Learning for Domain-Specific Chinese Spelling Correction](https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.82/) (Park et al., CCL 2025)
ACL