@inproceedings{sato-sasano-2026-language,
title = "How Do Language Models Acquire Character-Level Information?",
author = "Sato, Soma and
Sasano, Ryohei",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.282/",
pages = "5987--5997",
ISBN = "979-8-89176-380-7",
abstract = "Language models (LMs) have been reported to implicitly encode character-level information, despite not being explicitly provided during training. However, the mechanisms underlying this phenomenon remain largely unexplored. To reveal the mechanisms, we analyze how models acquire character-level knowledge by comparing LMs trained under controlled settings, such as specifying the pre-training dataset or tokenizer, with those trained under standard settings. We categorize the contributing factors into those independent of tokenization. Our analysis reveals that merge rules and orthographic constraints constitute primary factors arising from tokenization, whereas semantic associations of substrings and syntactic information function as key factors independent of tokenization."
}Markdown (Informal)
[How Do Language Models Acquire Character-Level Information?](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.282/) (Sato & Sasano, EACL 2026)
ACL
- Soma Sato and Ryohei Sasano. 2026. How Do Language Models Acquire Character-Level Information?. In Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5987–5997, Rabat, Morocco. Association for Computational Linguistics.