@inproceedings{chhabra-etal-2025-neuroplasticity,
title = "Neuroplasticity and Corruption in Model Mechanisms: A Case Study Of Indirect Object Identification",
author = "Chhabra, Vishnu Kabir and
Zhu, Ding and
Khalili, Mohammad Mahdi",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.170/",
pages = "3099--3122",
ISBN = "979-8-89176-195-7",
abstract = "Previous research has shown that fine-tuning language models on general tasks enhance their underlying mechanisms. However, the impact of fine-tuning on poisoned data and the resulting changes in these mechanisms are poorly understood. This study investigates the changes in a model`s mechanisms during toxic fine-tuning and identifies the primary corruption mechanisms. We also analyze the changes after retraining a corrupted model on the original dataset and observe neuroplasticity behaviors, where the model relearns original mechanisms after fine-tuning the corrupted model. Our findings indicate that; (i) Underlying mechanisms are amplified across task-specific fine-tuning which can be generalized to longer epochs, (ii) Model corruption via toxic fine-tuning is localized to specific circuit components, (iii) Models exhibit neuroplasticity when retraining corrupted models on clean dataset, reforming the original model mechanisms."
}
Markdown (Informal)
[Neuroplasticity and Corruption in Model Mechanisms: A Case Study Of Indirect Object Identification](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.170/) (Chhabra et al., Findings 2025)
ACL