@inproceedings{wegmann-etal-2025-tokenization,
title = "Tokenization is Sensitive to Language Variation",
author = "Wegmann, Anna and
Nguyen, Dong and
Jurgens, David",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.572/",
pages = "10958--10983",
ISBN = "979-8-89176-256-5",
abstract = "Variation in language is ubiquitous and often systematically linked to regional, social, and contextual factors. Tokenizers split texts into smaller units and might behave differently for less common linguistic forms. This might affect downstream LLM performance differently on two types of tasks: Tasks where the model should be robust to language variation (e.g., for semantic tasks like NLI, labels do not depend on whether a text uses British or American spelling) and tasks where the model should be sensitive to language variation (e.g., for form-based tasks like authorship verification, labels depend on whether a text uses British or American spelling). We pre-train BERT base models with the popular Byte-Pair Encoding algorithm to investigate how key tokenization design choices impact the performance of downstream models: the corpus used to train the tokenizer, the pre-tokenizer and the vocabulary size. We find that the best tokenizer varies on the two task types and that the pre-tokenizer has the biggest overall impact on performance. Further, we introduce a new approach to estimate tokenizer impact on downstream LLM performance, showing substantial improvement over metrics like R{\'e}nyi efficiency. We encourage more work on language variation and its relation to tokenizers and thus LLM performance."
}
Markdown (Informal)
[Tokenization is Sensitive to Language Variation](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.572/) (Wegmann et al., Findings 2025)
ACL
- Anna Wegmann, Dong Nguyen, and David Jurgens. 2025. Tokenization is Sensitive to Language Variation. In Findings of the Association for Computational Linguistics: ACL 2025, pages 10958–10983, Vienna, Austria. Association for Computational Linguistics.