@inproceedings{ma-etal-2025-cacheback,
title = "Cacheback: Speculative Decoding With Nothing But Cache",
author = "Ma, Zhiyao and
Gim, In and
Zhong, Lin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1581/",
pages = "31067--31072",
ISBN = "979-8-89176-332-6",
abstract = "We present Cacheback Decoding, a training-free and model-agnostic speculative decoding method that exploits the locality in language to accelerate Large Language Model (LLM) inference.Cacheback leverages only Least Recently Used (LRU) cache tables of token n-grams to generate draft sequences.Cacheback achieves state-of-the-art performance among comparable methods despite its minimalist design, and its simplicity allows easy integration into existing systems.Cacheback also shows potential for fast adaptation to new domains."
}Markdown (Informal)
[Cacheback: Speculative Decoding With Nothing But Cache](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1581/) (Ma et al., EMNLP 2025)
ACL