@inproceedings{chen-etal-2025-automatic,
title = "Automatic Expert Discovery in {LLM} Upcycling via Sparse Interpolated Mixture-of-Experts",
author = "Chen, Shengzhuang and
Wei, Ying and
Schwarz, Jonathan Richard",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.816/",
pages = "16703--16717",
ISBN = "979-8-89176-251-0",
abstract = "We present Sparse Interpolated Mixture-of-Experts (SIMoE) instruction-tuning, an end-to-end algorithm designed to fine-tune a dense pre-trained Large Language Model (LLM) into a MoE-style model that possesses capabilities in multiple specialized domains. During instruction-tuning, SIMoE automatically identifies multiple specialized experts under a specified sparsity constraint, with each expert representing a structurally sparse subset of the seed LLM{'}s parameters that correspond to domain-specific knowledge within the data. SIMoE simultaneously learns an input-dependent expert merging strategy via a router network, leveraging rich cross-expert knowledge for superior downstream generalization that surpasses existing baselines. Empirically, SIMoE consistently achieves state-of-the-art performance on common instruction-tuning benchmarks while maintaining an optimal performance-compute trade-off compared to all baselines."
}
Markdown (Informal)
[Automatic Expert Discovery in LLM Upcycling via Sparse Interpolated Mixture-of-Experts](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.816/) (Chen et al., ACL 2025)
ACL