@inproceedings{jeung-etal-2025-seps,
title = "{SEPS}: A Separability Measure for Robust Unlearning in {LLM}s",
author = "Jeung, Wonje and
Yoon, Sangyeon and
No, Albert",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.283/",
doi = "10.18653/v1/2025.emnlp-main.283",
pages = "5556--5587",
ISBN = "979-8-89176-332-6",
abstract = "Machine unlearning aims to selectively remove targeted knowledge from Large Language Models (LLMs), ensuring they forget specified content while retaining essential information. Existing unlearning metrics assess whether a model correctly answers retain queries and rejects forget queries, but they fail to capture real-world scenarios where forget queries rarely appear in isolation. In fact, forget and retain queries often coexist within the same prompt, making mixed-query evaluation crucial.We introduce SEPS, an evaluation framework that explicitly measures a model{'}s ability to both forget and retain information within a single prompt. Through extensive experiments across three benchmarks, we identify two key failure modes in existing unlearning methods: (1) untargeted unlearning indiscriminately erases both forget and retain content once a forget query appears, and (2) targeted unlearning overfits to single-query scenarios, leading to catastrophic failures when handling multiple queries. To address these issues, we propose Mixed Prompt (MP) unlearning, a strategy that integrates both forget and retain queries into a unified training objective. Our approach significantly improves unlearning effectiveness, demonstrating robustness even in complex settings with up to eight mixed forget and retain queries in a single prompt."
}Markdown (Informal)
[SEPS: A Separability Measure for Robust Unlearning in LLMs](https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.283/) (Jeung et al., EMNLP 2025)
ACL