@inproceedings{hiraoka-inui-2025-spelling,
title = "Spelling-out is not Straightforward: {LLM}s' Capability of Tokenization from Token to Characters",
author = "Hiraoka, Tatsuya and
Inui, Kentaro",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.719/",
doi = "10.18653/v1/2025.findings-emnlp.719",
pages = "13340--13353",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) can spell out tokens character by character with high accuracy, yet they struggle with more complex character-level tasks, such as identifying compositional subcomponents within tokens. In this work, we investigate how LLMs internally represent and utilize character-level information during the spelling-out process. Our analysis reveals that, although spelling out is a simple task for humans, it is not handled in a straightforward manner by LLMs. Specifically, we show that the embedding layer does not fully encode character-level information, particularly beyond the first character. As a result, LLMs rely on intermediate and higher Transformer layers to reconstruct character-level knowledge, where we observe a distinct ``breakthrough'' in their spelling behavior. We validate this mechanism through three complementary analyses: probing classifiers, identification of knowledge neurons, and inspection of attention weights."
}Markdown (Informal)
[Spelling-out is not Straightforward: LLMs’ Capability of Tokenization from Token to Characters](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.719/) (Hiraoka & Inui, Findings 2025)
ACL