@inproceedings{sun-li-2025-ishumei,
title = "i{S}humei-Chinchunmei at {S}em{E}val-2025 Task 4: A balanced forgetting and retention multi-task framework using effective unlearning loss",
author = "Sun, Yujian and
Li, Tian",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/transition-to-people-yaml/2025.semeval-1.181/",
pages = "1357--1369",
ISBN = "979-8-89176-273-2",
abstract = "As the Large Language Model (LLM) gains widespread adoption, increasing attention has been given to the challenge of making LLM forget non-compliant data memorized during its pre-training. Machine Unlearning focuses on efficiently erasing sensitive information from LLM under limited computational resources. To advance research in this area, SemEval 2025 Task 4: ``Unlearning Sensitive Content from Large Language Models'' introduces three unlearning datasets and establishes a benchmark by evaluating both forgetting effectiveness and the preservation of standard capabilities. In this work, we propose a more controllable forgetting loss, Effective Unlearning Loss, and explore its integration with various techniques to achieve more efficient and controlled unlearning. Our system ultimately ranked 5th on the competition leaderboard."
}
Markdown (Informal)
[iShumei-Chinchunmei at SemEval-2025 Task 4: A balanced forgetting and retention multi-task framework using effective unlearning loss](https://preview.aclanthology.org/transition-to-people-yaml/2025.semeval-1.181/) (Sun & Li, SemEval 2025)
ACL