@inproceedings{lasy-etal-2025-understanding,
title = "Understanding Verbatim Memorization in {LLM}s Through Circuit Discovery",
author = "Lasy, Ilya and
Knees, Peter and
Woltran, Stefan",
editor = "Jia, Robin and
Wallace, Eric and
Huang, Yangsibo and
Pimentel, Tiago and
Maini, Pratyush and
Dankers, Verna and
Wei, Johnny and
Lesci, Pietro",
booktitle = "Proceedings of the First Workshop on Large Language Model Memorization (L2M2)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.l2m2-1.7/",
pages = "83--94",
ISBN = "979-8-89176-278-7",
abstract = "Underlying mechanisms of memorization in LLMs{---}the verbatim reproduction of training data{---}remain poorly understood. What exact part of the network decides to retrieve a token that we would consider as start of memorization sequence? How exactly is the models' behaviour different when producing memorized sentence vs non-memorized? In this work we approach these questions from mechanistic interpretability standpoint by utilizing transformer circuits{---}the minimal computational subgraphs that perform specific functions within the model. Through carefully constructed contrastive datasets, we identify points where model generation diverges from memorized content and isolate the specific circuits responsible for two distinct aspects of memorization. We find that circuits that initiate memorization can also maintain it once started, while circuits that only maintain memorization cannot trigger its initiation. Intriguingly, memorization prevention mechanisms transfer robustly across different text domains, while memorization induction appears more context-dependent."
}
Markdown (Informal)
[Understanding Verbatim Memorization in LLMs Through Circuit Discovery](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.l2m2-1.7/) (Lasy et al., L2M2 2025)
ACL