@inproceedings{lai-chen-2025-neko,
title = "{NEKO} at {S}em{E}val-2025 Task 4: A Gradient Ascent Based Machine Unlearning Strategy",
author = "Lai, Chi Kuan and
Chen, Yifei",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.64/",
pages = "463--467",
ISBN = "979-8-89176-273-2",
abstract = "The power and wide application of large language models (LLMs) has brought the concerns on its risk of leaking private or sensitive information. However, retraining the modules is expensive and impractical, which introduces machine unlearning - removing specific information from language models while preserving general utility. Task 4 at SemEval 2025 consists of a shared task with this exact objective. We present an approach which combines gradient ascent-based forgetting with Kullback-Leibler (KL) divergence-based retention, applied to a 1-billion-parameter causal language model. Despite achieving effective forgetting, the system struggles with maintaining model utility. Our experiments reveal critical trade-off between unlearning effectiveness and performance preservation, highlighting challenges in practical machine unlearning implementations."
}
Markdown (Informal)
[NEKO at SemEval-2025 Task 4: A Gradient Ascent Based Machine Unlearning Strategy](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.64/) (Lai & Chen, SemEval 2025)
ACL