@inproceedings{bajpai-hanawal-2025-fastvlm,
title = "{F}ast{VLM}: Self-Speculative Decoding for Fast Vision-Language Model Inference",
author = "Bajpai, Divya Jyoti and
Hanawal, Manjesh Kumar",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.64/",
pages = "1166--1183",
ISBN = "979-8-89176-298-5",
abstract = "Vision-language Models (VLMs) have made significant strides in visual understanding and query response generation, but often face challenges of high computational cost and inference latency due to autoregressive decoding. In this work, we introduce an imitation-learning-based Self-Speculative Decoding (SSD) framework, named FastVLM, to address these limitations. Our approach employs a lightweight draft model for token generation in an autoregressive manner, while a full model verifies these tokens non-autoregressively. Accepted tokens proceed seamlessly, while rejected tokens are corrected by the full model and used to guide the draft model{'}s refinement. Through an imitation network, FastVLM enhances the draft model by integrating deeper-level insights from the full model{'}s architecture. Also, it maintains the performance integrity of the full model while training the draft model, achieving a balance between efficiency and accuracy. Our method speeds up the inference process by $1.55-1.85\times$ as compared to the final layer with minimal loss in performance. The source code is available at https://github.com/Div290/SSD."
}Markdown (Informal)
[FastVLM: Self-Speculative Decoding for Fast Vision-Language Model Inference](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.64/) (Bajpai & Hanawal, IJCNLP-AACL 2025)
ACL
- Divya Jyoti Bajpai and Manjesh Kumar Hanawal. 2025. FastVLM: Self-Speculative Decoding for Fast Vision-Language Model Inference. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 1166–1183, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.