@inproceedings{li-etal-2025-group,
title = "Group then Scale: Dynamic Mixture-of-Experts Multilingual Language Model",
author = "Li, Chong and
Deng, Yingzhuo and
Zhang, Jiajun and
Zong, Chengqing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.87/",
doi = "10.18653/v1/2025.findings-acl.87",
pages = "1730--1754",
ISBN = "979-8-89176-256-5",
abstract = "The curse of multilinguality phenomenon is a fundamental problem of multilingual Large Language Models (LLMs), where the competition between massive languages results in inferior performance. It mainly comes from limited capacity and negative transfer between dissimilar languages. To address this issue, we propose a method to dynamically group and scale up the parameters of multilingual LLM while boosting positive transfer among similar languages. Specifically, the model is first tuned on monolingual corpus to determine the parameter deviation in each layer and quantify the similarity between languages. Layers with more deviations are extended to mixture-of-experts layers to reduce competition between languages, where one expert module serves one group of similar languages. Experimental results on 18 to 128 languages show that our method reduces the negative transfer between languages and significantly boosts multilingual performance with fewer parameters. Such language group specialization on experts benefits the new language adaptation and reduces the inference on the previous multilingual knowledge learned."
}
Markdown (Informal)
[Group then Scale: Dynamic Mixture-of-Experts Multilingual Language Model](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.87/) (Li et al., Findings 2025)
ACL