@inproceedings{sastre-rosa-2025-memory,
title = "Memory Tokens: Large Language Models Can Generate Reversible Sentence Embeddings",
author = "Sastre, Ignacio and
Ros{\'a}, Aiala",
editor = "Jia, Robin and
Wallace, Eric and
Huang, Yangsibo and
Pimentel, Tiago and
Maini, Pratyush and
Dankers, Verna and
Wei, Johnny and
Lesci, Pietro",
booktitle = "Proceedings of the First Workshop on Large Language Model Memorization (L2M2)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.l2m2-1.14/",
doi = "10.18653/v1/2025.l2m2-1.14",
pages = "183--189",
ISBN = "979-8-89176-278-7",
abstract = "In this work, we observe an interesting phenomenon: it is possible to generate reversible sentence embeddings that allow an LLM to reconstruct the original text exactly, without modifying the model{'}s weights. This is achieved by introducing a special memory token, whose embedding is optimized through training on a fixed sequence. When prompted with this embedding, the model reconstructs the fixed sequence exactly. We evaluate this phenomenon across English and Spanish datasets, sequences of up to approximately 240 tokens, and model scales ranging from 100M to 8B parameters. Notably, Llama 3.1 8B successfully reconstructs all tested sequences. Our findings highlight an interesting capability of LLMs and suggest potential applications in memory-based retrieval, compression, and controlled text generation."
}
Markdown (Informal)
[Memory Tokens: Large Language Models Can Generate Reversible Sentence Embeddings](https://preview.aclanthology.org/landing_page/2025.l2m2-1.14/) (Sastre & Rosá, L2M2 2025)
ACL