@inproceedings{wei-etal-2025-flexora,
title = "Flexora: Flexible Low-Rank Adaptation for Large Language Models",
author = "Wei, Chenxing and
Shu, Yao and
He, Ying Tiffany and
Yu, Fei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.713/",
pages = "14643--14682",
ISBN = "979-8-89176-251-0",
abstract = "Large language models (LLMs) have revolutionized artificial intelligence, but their performance on specific tasks is often limited by knowledge boundaries. While fine-tuning techniques like low-rank adaptation (LoRA) aim to address this, they can suffer from overfitting. We propose flexible low-rank adaptation (Flexora), a novel method that automatically selects the most critical layers for fine-tuning to optimize performance across diverse downstream tasks. Flexora formulates layer selection as a hyperparameter optimization problem, employs unrolled differentiation for efficient solving, and identifies the most impactful layers based on optimized hyperparameters. Extensive experiments across various pre-trained models and natural language tasks demonstrate that Flexora consistently outperforms existing baselines. We provide theoretical insights and comprehensive ablation studies to elucidate the effectiveness of Flexora. Therefore, Flexora offers a robust solution to enhance LoRA fine-tuning for LLMs, potentially advancing the field of adaptive language model optimization."
}
Markdown (Informal)
[Flexora: Flexible Low-Rank Adaptation for Large Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.713/) (Wei et al., ACL 2025)
ACL
- Chenxing Wei, Yao Shu, Ying Tiffany He, and Fei Yu. 2025. Flexora: Flexible Low-Rank Adaptation for Large Language Models. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14643–14682, Vienna, Austria. Association for Computational Linguistics.