@inproceedings{song-etal-2025-vf,
title = "{VF}-Eval: Evaluating Multimodal {LLM}s for Generating Feedback on {AIGC} Videos",
author = "Song, Tingyu and
Hu, Tongyan and
Gan, Guo and
Zhao, Yilun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1027/",
pages = "21126--21146",
ISBN = "979-8-89176-251-0",
abstract = "Recently, multimodal large language models (MLLMs) have been extensively explored in video question answering. However, most existing assessments focus on natural videos, overlooking synthetic videos (e.g., AI-generated content). Meanwhile, some works in video generation rely on MLLMs to evaluate the quality of generated videos, but the capabilities of MLLMs on AIGC videos remain largely underexplored. To address this, we propose a new benchmark, VQ-Eval, which introduces four tasks{---}coherence validation, error awareness, error type detection, and reasoning evaluation{---}to comprehensively evaluate the abilities of MLLMs on AIGC videos. We evaluate 13 frontier MLLMs on VQ-Eval and find that even the best-performing model, GPT-4.1, struggles to achieve consistently good performance across all tasks. This highlights the challenging nature of our benchmark. Additionally, to investigate the practical applications of VQ-Eval in improving video generation, we design a re-prompt pipeline, demonstrating that aligning MLLMs more closely with human feedback can benefit the video generation."
}
Markdown (Informal)
[VF-Eval: Evaluating Multimodal LLMs for Generating Feedback on AIGC Videos](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1027/) (Song et al., ACL 2025)
ACL