@inproceedings{andryushchenko-ivanov-2025-evaluating,
title = "Evaluating Tokenizer Adaptation Methods for Large Language Models on Low-Resource Programming Languages",
author = "Andryushchenko, Georgy and
Ivanov, Vladimir V.",
editor = "Zhao, Jin and
Wang, Mingyang and
Liu, Zhu",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.acl-srw.57/",
pages = "823--833",
ISBN = "979-8-89176-254-1",
abstract = "Large language models (LLMs), which are primarily trained on high-resource programming languages (HRPLs), tend to perform sub-optimally for low-resource programming languages (LRPLs). This study investigates the impact of tokenizer adaptation methods on improving code generation for LRPLs. StarCoder 2 and DeepSeek-Coder models adapted to Elixir and Racket using methods such as Fast Vocabulary Transfer (FVT), FOCUS, and Zero-shot Tokenizer Transfer (ZeTT) are evaluated and compared with the original and fine-tuned models. Our experiments reveal that ZeTT outperforms other methods, achieving significant improvements in handling syntax, program logic, and data types for LRPLs. However, we also highlight performance declines in non-target languages like Python after tokenizer adaptation. The study approves the positive impact of tokenizer adaptation in enhancing LRPL code generation and suggests directions for future research, including token embeddings improvement."
}
Markdown (Informal)
[Evaluating Tokenizer Adaptation Methods for Large Language Models on Low-Resource Programming Languages](https://preview.aclanthology.org/landing_page/2025.acl-srw.57/) (Andryushchenko & Ivanov, ACL 2025)
ACL