@inproceedings{ning-etal-2025-mode,
title = "{M}o{DE}: Effective Multi-task Parameter Efficient Fine-Tuning with a Mixture of Dyadic Experts",
author = "Ning, Lin and
Lara, Harsh and
Guo, Meiqi and
Rastogi, Abhinav",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-naacl.460/",
pages = "8233--8246",
ISBN = "979-8-89176-195-7",
abstract = "Parameter-efficient fine-tuning techniques like Low-Rank Adaptation (LoRA) have revolutionized the adaptation of large language models (LLMs) to diverse tasks. Recent efforts have explored mixtures of LoRA modules for multi-task settings. However, our analysis reveals redundancy in the down-projection matrices of these architectures. This observation motivates our proposed method, Mixture of Dyadic Experts (MoDE), which introduces a novel design for efficient multi-task adaptation. This is done by sharing the down-projection matrix across tasks and employing atomic rank-one adapters, coupled with routers that allow more sophisticated task-level specialization. Our design allows for more fine-grained mixing, thereby increasing the model{'}s ability to jointly handle multiple tasks. We evaluate MoDE on the Supernatural Instructions (SNI) benchmark consisting of a diverse set of 700+ tasks and demonstrate that it outperforms state-of-the-art multi-task parameter-efficient fine-tuning (PEFT) methods, without introducing additional parameters. Our findings contribute to a deeper understanding of parameter efficiency in multi-task LLM adaptation and provide a practical solution for deploying high-performing, lightweight models."
}
Markdown (Informal)
[MoDE: Effective Multi-task Parameter Efficient Fine-Tuning with a Mixture of Dyadic Experts](https://preview.aclanthology.org/landing_page/2025.findings-naacl.460/) (Ning et al., Findings 2025)
ACL