@inproceedings{yuan-etal-2025-merge,
title = "Merge Hijacking: Backdoor Attacks to Model Merging of Large Language Models",
author = "Yuan, Zenghui and
Xu, Yangming and
Shi, Jiawen and
Zhou, Pan and
Sun, Lichao",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1571/",
pages = "32688--32703",
ISBN = "979-8-89176-251-0",
abstract = "Model merging for Large Language Models (LLMs) directly fuses the parameters of different models finetuned on various tasks, creating a unified model for multi-domain tasks. However, due to potential vulnerabilities in models available on open-source platforms, model merging is susceptible to backdoor attacks. In this paper, we propose $\textit{Merge Hijacking}$, the first backdoor attack targeting model merging in LLMs. The attacker constructs a malicious upload model and releases it. Once a victim user merges it with any other models, the resulting merged model inherits the backdoor while maintaining utility across tasks. Merge Hijacking defines two main objectives{---}effectiveness and utility{---}and achieves them through four steps. Extensive experiments demonstrate the effectiveness of our attack across different models, merging algorithms, and tasks. Additionally, we show that the attack remains effective even when merging real-world models. Moreover, our attack demonstrates robustness against two inference-time defenses (Paraphrasing and CLEANGEN) and one training-time defense (Fine-pruning)."
}
Markdown (Informal)
[Merge Hijacking: Backdoor Attacks to Model Merging of Large Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1571/) (Yuan et al., ACL 2025)
ACL