@inproceedings{wang-etal-2025-balancing,
title = "Balancing Forget Quality and Model Utility: A Reverse {KL}-Divergence Knowledge Distillation Approach for Better Unlearning in {LLM}s",
author = "Wang, Bichen and
Zi, Yuzhe and
Sun, Yixin and
Zhao, Yanyan and
Qin, Bing",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.naacl-long.60/",
pages = "1306--1321",
ISBN = "979-8-89176-189-6",
abstract = "As concern for privacy rights has grown and the size of language model training datasets has expanded, research into machine unlearning for large language models (LLMs) has become crucial. Before the era of LLMs, research on machine unlearning mainly focused on classification tasks in small parameter models. However, as parameter sizes have grown and unlearning targets have become more complex, unlearning has become more challenging, especially in scenarios involving generation instead of classification, as the output space of such models is significantly larger and more diverse. Existing methods based on gradient ascent and its variants often struggle with balancing forget quality and model utility, leading to either over unlearning or partial unlearning. To address this challenge, we propose Reverse KL-Divergence based Knowledge Distillation for Unlearning (RKLU), a novel unlearning method for LLMs. RKLU focuses on precisely unlearning the components of the token distribution related to the unlearning target, allowing us to achieve significant forget quality while maintaining model utility in our experiments."
}
Markdown (Informal)
[Balancing Forget Quality and Model Utility: A Reverse KL-Divergence Knowledge Distillation Approach for Better Unlearning in LLMs](https://preview.aclanthology.org/landing_page/2025.naacl-long.60/) (Wang et al., NAACL 2025)
ACL