@inproceedings{huang-etal-2025-mitigating,
title = "Mitigating Catastrophic Forgetting in Large Language Models with Forgetting-aware Pruning",
author = "Huang, Wei and
Cheng, Anda and
Wang, Yinggui",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1108/",
pages = "21853--21867",
ISBN = "979-8-89176-332-6",
abstract = "Recent advancements in large language models (LLMs) have shown impressive capabilities in various downstream tasks but typically face Catastrophic Forgetting (CF) during fine-tuning. In this paper, we propose the Forgetting-Aware Pruning Metric (FAPM), a novel pruning-based approach to balance CF and downstream task performance. Our investigation reveals that the degree to which task vectors (i.e., the subtraction of pre-trained weights from the weights fine-tuned on downstream tasks) overlap with pre-trained model parameters is a critical factor for CF. Based on this finding, FAPM employs the ratio of the task vector to pre-trained model parameters as a metric to quantify CF, integrating this measure into the pruning criteria. Importantly, FAPM does not necessitate modifications to the training process or model architecture, nor does it require any auxiliary data. We conducted extensive experiments across eight datasets, covering natural language inference, General Q{\&}A, Medical Q{\&}A, Math Q{\&}A, reading comprehension, and cloze tests. The results demonstrate that FAPM limits CF to just 0.25{\%} while maintaining 99.67{\%} accuracy on downstream tasks. We provide the codes of FAPM at an anonymous repository(https://anonymous.4open.science/r/FAPM-65CF)."
}Markdown (Informal)
[Mitigating Catastrophic Forgetting in Large Language Models with Forgetting-aware Pruning](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1108/) (Huang et al., EMNLP 2025)
ACL