@inproceedings{kobus-gunduz-2025-speculative,
title = "Speculative Sampling via Exponential Races",
author = "Kobus, Szymon and
Gunduz, Deniz",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.936/",
pages = "18189--18204",
ISBN = "979-8-89176-256-5",
abstract = "Speculative decoding accelerates large language model inference using a smaller draft model. In this paper, we establish a surprising connection between speculative sampling and the concept of channel simulation from information theory, which aims at simulating a noisy channel using as few bits as possible. This connection allows us to provide an information-theoretic analysis of the speed up that can be achieved by speculative sampling. Leveraging this link, we derive an explicit relation between generation speed-up and the number of tokens $k$ generated by the draft model for large $k$, which serves as an upper bound for all $k$. We also propose a novel speculative sampling method via exponential races called ERSS that matches state-of-the-art performance."
}
Markdown (Informal)
[Speculative Sampling via Exponential Races](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.936/) (Kobus & Gunduz, Findings 2025)
ACL
- Szymon Kobus and Deniz Gunduz. 2025. Speculative Sampling via Exponential Races. In Findings of the Association for Computational Linguistics: ACL 2025, pages 18189–18204, Vienna, Austria. Association for Computational Linguistics.