@inproceedings{zhang-etal-2025-neuroada,
title = "{N}euro{A}da: Activating Each Neuron{'}s Potential for Parameter-Efficient Fine-Tuning",
author = "Zhang, Zhi and
Shen, Yixian and
Cao, Congfeng and
Shutova, Ekaterina",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.555/",
pages = "10960--10977",
ISBN = "979-8-89176-332-6",
abstract = "Existing parameter-efficient fine-tuning (PEFT) methods primarily fall into two categories: addition-based and selective in-situ adaptation. The former, such as LoRA, introduce additional modules to adapt the model to downstream tasks, offering strong memory efficiency. However, their representational capacity is often limited, making them less suitable for fine-grained adaptation. In contrast, the latter directly fine-tunes a carefully chosen subset of the original model parameters, allowing for more precise and effective adaptation, but at the cost of significantly increased memory consumption.To reconcile this trade-off, we propose NeuroAda, a novel PEFT method that enables fine-grained model finetuning while maintaining high memory efficiency. Our approach first identifies important parameters (i.e., connections within the network) as in selective adaptation, and then introduces bypass connections for these selected parameters. During finetuning, only the bypass connections are updated, leaving the original model parameters frozen.Empirical results on 23+ tasks spanning both natural language generation and understanding demonstrate that NeuroAda achieves state-of-the-art performance with as little as $\leq \textbf{0.02}\%$ trainable parameters, while reducing CUDA memory usage by up to 60{\%}.We release our code here: \url{https://github.com/FightingFighting/NeuroAda.git}."
}Markdown (Informal)
[NeuroAda: Activating Each Neuron’s Potential for Parameter-Efficient Fine-Tuning](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.555/) (Zhang et al., EMNLP 2025)
ACL