@article{kojima-etal-2025-continual,
title = "Continual Pre-training on Character-level Noisy Texts Makes Decoder-based Language Models Robust Few-shot Learners",
author = "Kojima, Takeshi and
Matsuo, Yutaka and
Iwasawa, Yusuke",
journal = "Transactions of the Association for Computational Linguistics",
volume = "13",
year = "2025",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/ingest-eacl/2025.tacl-1.38/",
doi = "10.1162/tacl.a.21",
pages = "831--847",
abstract = "Recent decoder-based pre-trained language models (PLMs) generally use subword tokenizers. However, adding character-level perturbations drastically changes the delimitation of texts by the tokenizers, leading to the vulnerability of PLMs. This study proposes a method of continual pre-training to convert decoder-based PLMs with subword tokenizers into perturbation-robust few-shot in-context learners. Our method continually trains decoder-based PLMs to predict the next tokens conditioning on artificially created character-level noisy texts. Since decoder-based language models are auto-regressive, we skip noised words from the target optimization. In addition, to maintain the same word prediction performance under noisy text as clean text, our method employs word distribution matching between the original PLMs and training models. We conducted experiments on various subword-based PLMs, including GPT2, Pythia, Mistral, Gemma2, and Llama3, ranging from 1B to 8B parameters. The results demonstrate that our method consistently improves the performance of few-shot in-context learning on downstream tasks which contain actual typos or misspellings as well as artificial noise.1"
}Markdown (Informal)
[Continual Pre-training on Character-level Noisy Texts Makes Decoder-based Language Models Robust Few-shot Learners](https://preview.aclanthology.org/ingest-eacl/2025.tacl-1.38/) (Kojima et al., TACL 2025)
ACL