@inproceedings{feher-etal-2025-retrofitting,
title = "Retrofitting Large Language Models with Dynamic Tokenization",
author = "Feher, Darius and
Vuli{\'c}, Ivan and
Minixhofer, Benjamin",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1444/",
pages = "29866--29883",
ISBN = "979-8-89176-251-0",
abstract = "Current language models (LMs) use a fixed, static subword tokenizer. This default choice typically results in degraded efficiency and language capabilities, especially in languages other than English. To address this issue, we challenge the static design and propose retrofitting LMs with dynamic tokenization: a way to dynamically decide on token boundaries based on the input text via a subword-merging algorithm inspired by byte-pair encoding. We merge frequent subword sequences in a batch, then apply a pre-trained embedding-prediction hypernetwork to compute the token embeddings on-the-fly. For encoder-style models (e.g., XLM-R), this on average reduces token sequence lengths by {\ensuremath{>}}20{\%} across 14 languages while degrading performance by less than 2{\%}. The same method applied to pre-filling and scoring in decoder-style models (e.g., Mistral-7B) results in minimal performance degradation at up to 17{\%} reduction in sequence length. Overall, we find that dynamic tokenization can mitigate the limitations of static tokenization by substantially improving inference speed and promoting fairness across languages, enabling more equitable and adaptable LMs."
}
Markdown (Informal)
[Retrofitting Large Language Models with Dynamic Tokenization](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1444/) (Feher et al., ACL 2025)
ACL
- Darius Feher, Ivan Vulić, and Benjamin Minixhofer. 2025. Retrofitting Large Language Models with Dynamic Tokenization. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 29866–29883, Vienna, Austria. Association for Computational Linguistics.