@inproceedings{williams-aletras-2025-vocabulary,
title = "Vocabulary-level Memory Efficiency for Language Model Fine-tuning",
author = "Williams, Miles and
Aletras, Nikolaos",
editor = "Adlakha, Vaibhav and
Chronopoulou, Alexandra and
Li, Xiang Lorraine and
Majumder, Bodhisattwa Prasad and
Shi, Freda and
Vernikos, Giorgos",
booktitle = "Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)",
month = may,
year = "2025",
address = "Albuquerque, NM",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.repl4nlp-1.14/",
pages = "185--196",
ISBN = "979-8-89176-245-9",
abstract = "The extensive memory footprint of language model (LM) fine-tuning poses a challenge for both researchers and practitioners. LMs use an embedding matrix to represent extensive vocabularies, forming a substantial proportion of the model parameters. While previous work towards memory-efficient fine-tuning has focused on minimizing the number of trainable parameters, reducing the memory footprint of the embedding matrix has yet to be explored. We first demonstrate that a significant proportion of the vocabulary remains unused during fine-tuning. We then propose a simple yet effective approach that leverages this finding to minimize memory usage. We show that our approach provides substantial reductions in memory usage across a wide range of models and tasks. Notably, our approach does not impact downstream task performance, while allowing more efficient use of computational resources."
}
Markdown (Informal)
[Vocabulary-level Memory Efficiency for Language Model Fine-tuning](https://preview.aclanthology.org/landing_page/2025.repl4nlp-1.14/) (Williams & Aletras, RepL4NLP 2025)
ACL