@inproceedings{liu-etal-2025-video,
title = "Video Compression Commander: Plug-and-Play Inference Acceleration for Video Large Language Models",
author = "Liu, Xuyang and
Wang, Yiyu and
Ma, Junpeng and
Zhang, Linfeng",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.98/",
pages = "1910--1924",
ISBN = "979-8-89176-332-6",
abstract = "Video large language models (VideoLLM) excel at video understanding, but face efficiency challenges due to the quadratic complexity of abundant visual tokens. Our systematic analysis of token compression methods for VideoLLMs reveals two critical issues: \textbf{(i)} overlooking distinctive visual signals across frames, leading to information loss; \textbf{(ii)} suffering from implementation constraints, causing incompatibility with modern architectures or efficient operators.To address these challenges, we distill three design principles for VideoLLM token compression and propose a plug-and-play inference acceleration framework ``\textbf{Vid}eo \textbf{Com}pression \textbf{Com}mander'' (\textbf{VidCom$^2$}). By quantifying each frame{'}s uniqueness, VidCom$^2$ adaptively adjusts compression intensity across frames, effectively preserving essential information while reducing redundancy in video sequences. Extensive experiments across various VideoLLMs and benchmarks demonstrate the superior performance and efficiency of our VidCom$^2$. With only \textbf{25{\%}} visual tokens, VidCom$^2$ achieves \textbf{99.6{\%}} of the original performance on LLaVA-OV while reducing \textbf{70.8{\%}} of the LLM generation latency. Notably, our Frame Compression Adjustment strategy is compatible with other token compression methods to further improve their performance. Our code is available at \url{https://github.com/xuyang-liu16/VidCom2}."
}Markdown (Informal)
[Video Compression Commander: Plug-and-Play Inference Acceleration for Video Large Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.98/) (Liu et al., EMNLP 2025)
ACL