@inproceedings{zhang-etal-2025-merge,
title = "Merge then Realign: Simple and Effective Modality-Incremental Continual Learning for Multimodal {LLM}s",
author = "Zhang, Dingkun and
Qi, Shuhan and
Xiao, Xinyu and
Chen, Kehai and
Wang, Xuan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.665/",
doi = "10.18653/v1/2025.emnlp-main.665",
pages = "13159--13175",
ISBN = "979-8-89176-332-6",
abstract = "Recent advances in Multimodal Large Language Models (MLLMs) have enhanced their versatility as they integrate a growing number of modalities. Considering the heavy cost of training MLLMs, it is efficient to reuse the existing ones and extend them to more modalities through Modality-incremental Continual Learning (MCL). The exploration of MCL is in its early stages. In this work, we dive into the causes of performance degradation in MCL. We uncover that it suffers not only from forgetting as in traditional continual learning, but also from misalignment between the modality-agnostic and modality-specific components. To this end, we propose an elegantly simple MCL paradigm called ``MErge then ReAlign'' (MERA) to address both forgetting and misalignment. MERA avoids introducing heavy model budgets or modifying model architectures, hence is easy to deploy and highly reusable in the MLLM community. Extensive experiments demonstrate the impressive performance of MERA, holding an average of 99.84{\%} Backward Relative Gain when extending to four modalities, achieving nearly lossless MCL performance. Our findings underscore the misalignment issue in MCL. More broadly, our work showcases how to adjust different components of MLLMs during continual learning."
}Markdown (Informal)
[Merge then Realign: Simple and Effective Modality-Incremental Continual Learning for Multimodal LLMs](https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.665/) (Zhang et al., EMNLP 2025)
ACL