@inproceedings{zhong-etal-2025-low,
title = "Low-Rank Interconnected Adaptation across Layers",
author = "Zhong, Yibo and
Zhao, Jinman and
Zhou, Yao",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.874/",
pages = "17005--17029",
ISBN = "979-8-89176-256-5",
abstract = "Low-rank adaptation (LoRA) is a widely used parameter-efficient fine-tuning (PEFT) method that learns weight updates $\Delta W = AB$ for pretrained weights $W$ through low-rank adapters $A$ and $B$. While LoRA ensures hardware efficiency, its low-rank weight updates limit adaptation performance. In this paper, we propose low-rank interconnected adaptation across layers (Lily), a novel PEFT method that introduces an interconnected framework with locally shared $A$ and globally shared $B$ experts. This structure eliminates redundant per-layer $AB$ pairs, enabling higher-rank $\Delta W$ with equal or fewer parameters. To enhance expressiveness, we use data-dependent routers to determine $A$-$B$ interconnections, preventing $B$ experts from converging to the same behavior and improving representational power across domains. Experiments across modalities, architectures, and model sizes demonstrate Lily{'}s superior performance and efficiency."
}
Markdown (Informal)
[Low-Rank Interconnected Adaptation across Layers](https://preview.aclanthology.org/landing_page/2025.findings-acl.874/) (Zhong et al., Findings 2025)
ACL
- Yibo Zhong, Jinman Zhao, and Yao Zhou. 2025. Low-Rank Interconnected Adaptation across Layers. In Findings of the Association for Computational Linguistics: ACL 2025, pages 17005–17029, Vienna, Austria. Association for Computational Linguistics.