@inproceedings{lizzo-heck-2025-unlearn,
title = "{UNLEARN} Efficient Removal of Knowledge in Large Language Models",
author = "Lizzo, Tyler and
Heck, Larry",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/moar-dois/2025.findings-naacl.405/",
doi = "10.18653/v1/2025.findings-naacl.405",
pages = "7257--7268",
ISBN = "979-8-89176-195-7",
abstract = "Large Language Models (LLMs) excel in many Natural Language Processing tasks but are outperformed by specialized tools for certain tasks. This raises the question: Can we reduce redundant LLM parameters when using these tools? Given the size and high training costs of LLMs, it is essential to efficiently forget specific knowledge without retraining. This paper introduces UNLEARN, a novel method that uses subspace techniques to selectively remove knowledge without access to the original training data, without retraining, and with minimal impact to other tasks. Our results show that UNLEARN significantly outperforms previous methods for forgetting targeted (unwanted) knowledge while also preserving related (wanted) knowledge. We also propose LEARN, a complementary approach for targeted knowledge addition, which achieves fine-tuning accuracy comparable to Low-Rank Adaptation (LoRA) without degrading related task performance."
}
Markdown (Informal)
[UNLEARN Efficient Removal of Knowledge in Large Language Models](https://preview.aclanthology.org/moar-dois/2025.findings-naacl.405/) (Lizzo & Heck, Findings 2025)
ACL