@inproceedings{naskar-etal-2025-ju,
title = "{JU}-{CSE}-{NLP}{'}25 at {S}em{E}val-2025 Task 4: Learning to Unlearn {LLM}s",
author = "Naskar, Arkajyoti and
Das, Dipankar and
Bandyopadhyay, Sivaji",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/transition-to-people-yaml/2025.semeval-1.267/",
pages = "2059--2064",
ISBN = "979-8-89176-273-2",
abstract = "Large Language Models (LLMs) have achieved enormous success recently due to their ability to understand and solve various non-trivial tasks in natural language. However, they have been shown to memorize their training data which, among other concerns, increases the risk of the model regurgitating creative or private content, potentially leading to legal issues for the model developer and/or vendors. Such issues are often discovered post-model training during testing or red teaming. While unlearning has been studied for some time in classification problems, it is still a relatively underdeveloped area of study in LLM research since the latter operates in a potentially unbounded output label space. Specifically, robust evaluation frameworks are lacking to assess the accuracy of these unlearning strategies. In this challenge, we aim to bridge this gap by developing a comprehensive evaluation challenge for unlearning sensitive datasets in LLMs."
}
Markdown (Informal)
[JU-CSE-NLP’25 at SemEval-2025 Task 4: Learning to Unlearn LLMs](https://preview.aclanthology.org/transition-to-people-yaml/2025.semeval-1.267/) (Naskar et al., SemEval 2025)
ACL