@inproceedings{kudelya-shirnin-2025-lacuna,
title = "Lacuna Inc. at {S}em{E}val-2025 Task 4: {L}o{RA}-Enhanced Influence-Based Unlearning for {LLM}s",
author = "Kudelya, Aleksey and
Shirnin, Alexander",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.201/",
pages = "1528--1533",
ISBN = "979-8-89176-273-2",
abstract = "This paper describes LIBU (LoRA enhanced influence-based unlearning), an algorithm to solve the task of unlearning - removing specific knowledge from a large language model without retraining from scratch and compromising its overall utility (SemEval-2025 Task 4: Unlearning sensitive content from Large Language Models). The algorithm combines classical influence functions to remove the influence of thedata from the model and second-order optimization to stabilize the overall utility. Our experiments show that this lightweight approach is well applicable for unlearning LLMs in different kinds of task."
}
Markdown (Informal)
[Lacuna Inc. at SemEval-2025 Task 4: LoRA-Enhanced Influence-Based Unlearning for LLMs](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.201/) (Kudelya & Shirnin, SemEval 2025)
ACL