@inproceedings{he-etal-2025-pre,
title = "Pre-trained Models Perform the Best When Token Distributions Follow {Z}ipf{'}s Law",
author = "He, Yanjin and
Zeng, Qingkai and
Jiang, Meng",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1421/",
pages = "27997--28009",
ISBN = "979-8-89176-332-6",
abstract = "Tokenization is a fundamental step in natural language processing (NLP) and other sequence modeling domains, where the choice of vocabulary size significantly impacts model performance. Despite its importance, selecting an optimal vocabulary size remains underexplored, typically relying on heuristics or dataset-specific choices. In this work, we propose a principled method for determining the vocabulary size by analyzing token frequency distributions through Zipf{'}s law. We show that downstream task performance correlates with how closely token distributions follow power-law behavior, and that aligning with Zipfian scaling improves both model efficiency and effectiveness. Extensive experiments across NLP, genomics, and chemistry demonstrate that models consistently achieve peak performance when the token distribution closely adheres to Zipf{'}s law, establishing Zipfian alignment as a robust and generalizable criterion for vocabulary size selection. The code and data are available at: https://github.com/yanjinhe/Tokenizer"
}Markdown (Informal)
[Pre-trained Models Perform the Best When Token Distributions Follow Zipf’s Law](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1421/) (He et al., EMNLP 2025)
ACL