@inproceedings{hu-etal-2025-exploring,
title = "Exploring Model Kinship for Merging Large Language Models",
author = "Hu, Yedi and
Yao, Yunzhi and
Zhang, Ningyu and
Chen, Huajun and
Deng, Shumin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.32/",
doi = "10.18653/v1/2025.findings-emnlp.32",
pages = "596--625",
ISBN = "979-8-89176-335-7",
abstract = "Model merging has become one of the key technologies for enhancing the capabilities and efficiency of Large Language Models (LLMs). The open-source community has driven model evolution by iteratively merging existing models. However, a principled understanding of the expected gains and underlying factors in model merging remains lacking. In this work, we examine model evolution through continual merging, analogous to biological evolution, and introduce the concept of model kinship, the degree of similarity or relatedness between LLMs. With comprehensive empirical analysis, we find that there is a certain relationship between model kinship and the performance gains after model merging, which can help guide our selection of candidate models. Inspired by this, we propose a new model merging strategy: Top-$k$ Greedy Merging with Model Kinship, which can yield better performance on benchmark datasets. Specifically, we discover that using model kinship as a criterion can assist us in continuously performing model merging, alleviating the degradation (local optima) in model evolution, whereas model kinship can serve as a guide to escape these traps."
}Markdown (Informal)
[Exploring Model Kinship for Merging Large Language Models](https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.32/) (Hu et al., Findings 2025)
ACL