@inproceedings{li-etal-2024-loran,
title = "{L}o{RAN}: Improved Low-Rank Adaptation by a Non-Linear Transformation",
author = "Li, Yinqiao and
Song, Linqi and
Hou, Hanxu",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.findings-emnlp.177/",
doi = "10.18653/v1/2024.findings-emnlp.177",
pages = "3134--3143",
abstract = "In this paper, we study parameter-efficient fine-tuning methods for large pre-trained models. Specifically, we improve LoRA approaches to alleviate the performance loss from the constrained adapter by introducing a non-linear transformation (call it LoRAN). For a better adaptation, we also design a new non-linear function to appropriately fit the accumulated weight updates. We test our method in multiple advanced large language models. Experimental results show that our LoRAN significantly outperforms a strong baseline on SAMSum and 20 Newsgroups tasks. Moreover, when a lower rank is applied, our approach even yields a 1.95-point improvement in the classification task."
}
Markdown (Informal)
[LoRAN: Improved Low-Rank Adaptation by a Non-Linear Transformation](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.findings-emnlp.177/) (Li et al., Findings 2024)
ACL