@inproceedings{pirhadi-etal-2025-cvt5,
title = "{CVT}5: Using Compressed Video Encoder and {UMT}5 for Dense Video Captioning",
author = "Pirhadi, Mohammad Javad and
Mirzaei, Motahhare and
Eetemadi, Sauleh",
editor = "Zhang, Wei Emma and
Dai, Xiang and
Elliot, Desmond and
Fang, Byron and
Sim, Mongyuan and
Zhuang, Haojie and
Chen, Weitong",
booktitle = "Proceedings of the First Workshop of Evaluation of Multi-Modal Generation",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.evalmg-1.2/",
pages = "10--23",
abstract = "The dense video captioning task aims to detect all events occurring in a video and describe each event using natural language. Unlike most other video processing tasks, where it is typically assumed that videos contain only a single main event, this task deals with long, untrimmed videos. Consequently, the speed of processing videos in dense video captioning is a critical aspect of the system. To the best of our knowledge, all published work on this task uses RGB frames to encode input videos. In this work, we introduce the use of compressed videos for the first time in this task. Our experiments on the SoccerNet challenge demonstrate significant improvements in both processing speed and GPU memory footprint while achieving competitive results. Additionally, we leverage multilingual transcripts, which seems to be effective. The encoder in our proposed method achieves approximately 5.4{\texttimes} higher speed and 5.1{\texttimes} lower GPU memory usage during training, and 4.7{\texttimes} higher speed and 7.8{\texttimes} lower GPU memory usage during inference, compared to its RGB-based counterpart. The code is publicly available at https://github.com/mohammadjavadpirhadi/CVT5."
}
Markdown (Informal)
[CVT5: Using Compressed Video Encoder and UMT5 for Dense Video Captioning](https://preview.aclanthology.org/fix-sig-urls/2025.evalmg-1.2/) (Pirhadi et al., EvalMG 2025)
ACL