@inproceedings{zhou-etal-2025-cola,
title = "{C}o{LA}: Collaborative Low-Rank Adaptation",
author = "Zhou, Yiyun and
Yao, Chang and
Chen, Jingyuan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.726/",
doi = "10.18653/v1/2025.findings-acl.726",
pages = "14115--14130",
ISBN = "979-8-89176-256-5",
abstract = "The scaling law of Large Language Models (LLMs) reveals a power-law relationship, showing diminishing return on performance as model scale increases. While training LLMs from scratch is resource-intensive, fine-tuning a pre-trained model for specific tasks has become a practical alternative. Full fine-tuning (FFT) achieves strong performance; however, it is computationally expensive and inefficient. Parameter-efficient fine-tuning (PEFT) methods, like LoRA, have been proposed to address these challenges by freezing the pre-trained model and adding lightweight task-specific modules. LoRA, in particular, has proven effective, but its application to multi-task scenarios is limited by interference between tasks. Recent approaches, such as Mixture-of-Experts (MOE) and asymmetric LoRA, have aimed to mitigate these issues but still struggle with sample scarcity and noise interference due to their fixed structure. In response, we propose CoLA, a more flexible LoRA architecture with an efficient initialization scheme, which introduces three collaborative strategies to enhance performance by better utilizing the quantitative relationships between matrices $A$ and $B$. Our experiments demonstrate the effectiveness and robustness of CoLA, outperforming existing PEFT methods, especially in low-sample scenarios. Our data and code are fully publicly available: https://github.com/zyy-2001/CoLA."
}
Markdown (Informal)
[CoLA: Collaborative Low-Rank Adaptation](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.726/) (Zhou et al., Findings 2025)
ACL
- Yiyun Zhou, Chang Yao, and Jingyuan Chen. 2025. CoLA: Collaborative Low-Rank Adaptation. In Findings of the Association for Computational Linguistics: ACL 2025, pages 14115–14130, Vienna, Austria. Association for Computational Linguistics.