@inproceedings{hamdan-etal-2025-r,
title = "{R}-{BPE}: Improving {BPE}-Tokenizers with Token Reuse",
author = "Hamdan, Nancy and
Rakan Al Mraikhat, Osama and
Zaraket, Fadi A.",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1169/",
doi = "10.18653/v1/2025.emnlp-main.1169",
pages = "22951--22959",
ISBN = "979-8-89176-332-6",
abstract = "This paper presents R-BPE, a lightweight framework for adapting existing Byte-Pair Encoding (BPE) tokenizers to better support a specified target language. It reuses tokens from user-excluded languages and creates ID-based maps to resolve the new tokens of the chosen language. We evaluate R-BPE on Arabic as a target language. R-BPE reduced subword fertility by an average of 24.4{\%} across the LLaMA 3.1 8B, Command R 35B, and Qwen 3 8B models. Applied to LLaMA 3.1 8B in continued pretraining mode, R-BPE yields a 7.33{\%} reduction in training time. On the ArabicMMLU benchmark, the resulting model improved by 5.09 points on five in-domain topics and matched the original model{'}s overall performance. It also preserved performance on EnglishMMLU. R-BPE effectively leverages existing models' tokenizers, embedding layers, and performance to better support target languages without incurring model size changes. We release an R-BPE implementation that is compatible with HuggingFace interfaces and thereby readily applicable to a wide range of existing models at \url{https://acr.ps/1L9GPmL}."
}Markdown (Informal)
[R-BPE: Improving BPE-Tokenizers with Token Reuse](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1169/) (Hamdan et al., EMNLP 2025)
ACL
- Nancy Hamdan, Osama Rakan Al Mraikhat, and Fadi A. Zaraket. 2025. R-BPE: Improving BPE-Tokenizers with Token Reuse. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 22951–22959, Suzhou, China. Association for Computational Linguistics.