@inproceedings{xu-etal-2025-refreshkv,
title = "{R}efresh{KV}: Updating Small {KV} Cache During Long-form Generation",
author = "Xu, Fangyuan and
Goyal, Tanya and
Choi, Eunsol",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.acl-long.1211/",
pages = "24878--24893",
ISBN = "979-8-89176-251-0",
abstract = "Generating long sequences of tokens given a long-context input is a very compute-intensive inference scenario for large language models (LLMs). One prominent inference speed-up approach is constructing a smaller key-value (KV) cache, relieving LLMs from computing attention over a long sequence of tokens. While such methods work well to generate short sequences, their performance degrades rapidly for long-form generation. Most KV compression happens once, prematurely removing tokens that can be useful later in the generation. We propose a new inference-time method, RefreshKV, that flexibly alternates between full context attention and attention over a subset of input tokens during generation. After each full attention step, we update the smaller KV cache based on the attention pattern over the entire input. Applying our method to off-the-shelf LLMs achieves comparable speedup to eviction-based methods while improving performance for various long-form generation tasks. Lastly, we show that continued pretraining with our inference setting brings further gains in performance."
}
Markdown (Informal)
[RefreshKV: Updating Small KV Cache During Long-form Generation](https://preview.aclanthology.org/landing_page/2025.acl-long.1211/) (Xu et al., ACL 2025)
ACL