@inproceedings{ed-dib-etal-2025-gelora,
title = "{G}e{L}o{RA}: Geometric Adaptive Ranks For Efficient {L}o{RA} Fine-tuning",
author = "Ed-dib, Abdessalam and
Datbayev, Zhanibek and
Aboussalah, Amine M.",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1372/",
doi = "10.18653/v1/2025.findings-emnlp.1372",
pages = "25174--25196",
ISBN = "979-8-89176-335-7",
abstract = "Fine-tuning large language models (LLMs) is computationally expensive because it requires updating all model parameters. Low-Rank Adaptation (LoRA) reduces this cost by modifying a subset of weights, but selecting the appropriate rank introduces a trade-off: lower ranks improve efficiency at the expense of expressivity, while higher ranks enhance performance but increase computational burden. Existing adaptive LoRA methods lack a theoretical foundation to guide this trade-off optimally. We propose Geometric Low-Rank Adaptation (GeLoRA), a principled approach that estimates the intrinsic dimensionality of hidden data representations to adaptively select LoRA ranks. We show theoretically and empirically that the intrinsic dimension serves as a lower bound for the optimal rank of LoRA matrices, enabling a balance between efficiency and expressivity. Extensive experiments on GLUE, SQuAD (with DeBERTa), and MT-Bench (with LLaMA) demonstrate that GeLoRA consistently outperforms recent adaptive LoRA methods by up to +1.0{\%}, while simultaneously reducing computational time by 13.5{\%} to 64.2{\%}, depending on the baseline, under the same parameter budget."
}Markdown (Informal)
[GeLoRA: Geometric Adaptive Ranks For Efficient LoRA Fine-tuning](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1372/) (Ed-dib et al., Findings 2025)
ACL