@inproceedings{li-etal-2025-tokalign,
title = "{T}ok{A}lign: Efficient Vocabulary Adaptation via Token Alignment",
author = "Li, Chong and
Zhang, Jiajun and
Zong, Chengqing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.207/",
pages = "4109--4126",
ISBN = "979-8-89176-251-0",
abstract = "Tokenization serves as a foundational step for Large Language Models (LLMs) to process text. In new domains or languages, the inefficiency of the tokenizer will slow down the training and generation of LLM. The mismatch in vocabulary also hinders deep knowledge transfer between LLMs like token-level distillation. To mitigate this gap, we propose an efficient method named **TokAlign** to replace the vocabulary of LLM from the token co-occurrences view, and further transfer the token-level knowledge between models. It first aligns the source vocabulary to the target one by learning a one-to-one mapping matrix for token IDs. Model parameters, including embeddings, are rearranged and progressively fine-tuned for the new vocabulary. Our method significantly improves multilingual text compression rates and vocabulary initialization for LLMs, decreasing the perplexity from ${3.4e}^{2}$ of strong baseline methods to ${1.2e}^{2}$ after initialization. Experimental results on models across multiple parameter scales demonstrate the effectiveness and generalization of TokAlign, which costs as few as 5k steps to restore the performance of the vanilla model. After unifying vocabularies between LLMs, token-level distillation can remarkably boost (+4.4{\%} than sentence-level distillation) the base model, costing only 235M tokens."
}
Markdown (Informal)
[TokAlign: Efficient Vocabulary Adaptation via Token Alignment](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.207/) (Li et al., ACL 2025)
ACL
- Chong Li, Jiajun Zhang, and Chengqing Zong. 2025. TokAlign: Efficient Vocabulary Adaptation via Token Alignment. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4109–4126, Vienna, Austria. Association for Computational Linguistics.