@inproceedings{wu-etal-2023-rethinking,
title = "Rethinking Masked Language Modeling for {C}hinese Spelling Correction",
author = "Wu, Hongqiu and
Zhang, Shaohua and
Zhang, Yuchen and
Zhao, Hai",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.600/",
doi = "10.18653/v1/2023.acl-long.600",
pages = "10743--10756",
abstract = "In this paper, we study Chinese Spelling Correction (CSC) as a joint decision made by two separate models: a language model and an error model. Through empirical analysis, we find that fine-tuning BERT tends to over-fit the error model while under-fit the language model, resulting in poor generalization to out-of-distribution error patterns. Given that BERT is the backbone of most CSC models, this phenomenon has a significant negative impact. To address this issue, we are releasing a multi-domain benchmark LEMON, with higher quality and diversity than existing benchmarks, to allow a comprehensive assessment of the open domain generalization of CSC models. Then, we demonstrate that a very simple strategy {--} randomly masking 20{\%} non-error tokens from the input sequence during fine-tuning {--} is sufficient for learning a much better language model without sacrificing the error model. This technique can be applied to any model architecture and achieves new state-of-the-art results on SIGHAN, ECSpell, and LEMON."
}
Markdown (Informal)
[Rethinking Masked Language Modeling for Chinese Spelling Correction](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.600/) (Wu et al., ACL 2023)
ACL