@inproceedings{dou-etal-2025-avoiding,
title = "Avoiding Copyright Infringement via Large Language Model Unlearning",
author = "Dou, Guangyao and
Liu, Zheyuan and
Lyu, Qing and
Ding, Kaize and
Wong, Eric",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.288/",
pages = "5176--5200",
ISBN = "979-8-89176-195-7",
abstract = "Pre-trained Large Language Models (LLMs) have demonstrated remarkable capabilities but also pose risks by learning and generating copyrighted material, leading to significant legal and ethical concerns. In real-world scenarios, model owners need to continuously address copyright infringement as new requests for content removal emerge at different time points. This leads to the need for sequential unlearning, where copyrighted content is removed sequentially as new requests arise. Despite its practical relevance, sequential unlearning in the context of copyright infringement has not been rigorously explored in existing literature. To address this gap, we propose Stable Sequential Unlearning (SSU), a novel framework designed to unlearn copyrighted content from LLMs over multiple time steps. Our approach works by identifying and removing specific weight updates in the model{'}s parameters that correspond to copyrighted content. We improve unlearning efficacy by introducing random labeling loss and ensuring the model retains its general-purpose knowledge by adjusting targeted parameters. Experimental results show that SSU achieves an effective trade-off between unlearning efficacy and general-purpose language abilities, outperforming existing baselines."
}
Markdown (Informal)
[Avoiding Copyright Infringement via Large Language Model Unlearning](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.288/) (Dou et al., Findings 2025)
ACL