@inproceedings{guo-etal-2025-nlora,
title = {{NL}o{RA}: Nystr{\"o}m-Initiated Low-Rank Adaptation for Large Language Models},
author = "Guo, Chenlu and
Chang, Yi and
Wu, Yuan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.72/",
pages = "1371--1385",
ISBN = "979-8-89176-335-7",
abstract = {Parameter-efficient fine-tuning (PEFT) is essential for adapting large language models (LLMs), with low rank adaptation (LoRA) being the most popular approach. However, LoRA suffers from slow convergence, and some recent LoRA variants, such as PiSSA, primarily rely on Singular Value Decomposition (SVD) for initialization, leading to expensive computation. To mitigate these problems, we resort to Nystr{\"o}m method, which follows a three-matrix manipulation. Therefore, we first introduce StructuredLoRA (SLoRA), investigating to introduce a small intermediate matrix between the low-rank matrices (A) and (B). Secondly, we propose Nystr{\"o}mLoRA (NLoRA), which leverages Nystr{\"o}m-based initialization for SLoRA to improve its effectiveness and efficiency. Finally, we propose IntermediateTune (IntTune) to explore fine-tuning exclusively the intermediate matrix of NLoRA to furthermore boost LLMs' efficiency. We evaluate our methods on 5 natural language generation (NLG) tasks and 8 natural language understanding (NLU) tasks. On GSM8K, SLoRA and NLoRA achieve accuracies of 56.48{\%} and 57.70{\%}, surpassing LoRA by 33.52{\%} and 36.41{\%} with only 3.67M additional trainable parameters. IntTune boosts average NLG performance over LoRA by 7.45{\%} while using only 1.25{\%} of its parameters. These results demonstrate the efficiency and effectiveness of our approach in enhancing model performance with minimal parameter overhead.}
}Markdown (Informal)
[NLoRA: Nyström-Initiated Low-Rank Adaptation for Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.72/) (Guo et al., Findings 2025)
ACL