@inproceedings{song-etal-2025-burn,
title = "Burn After Reading: Do Multimodal Large Language Models Truly Capture Order of Events in Image Sequences?",
author = "Song, Yingjin and
Du, Yupei and
Paperno, Denis and
Gatt, Albert",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.1248/",
doi = "10.18653/v1/2025.findings-acl.1248",
pages = "24316--24342",
ISBN = "979-8-89176-256-5",
abstract = "This paper introduces the TempVS benchmark, which focuses on temporal grounding and reasoning capabilities of Multimodal Large Language Models (MLLMs) in image sequences. TempVS consists of three main tests (i.e., event relation inference, sentence ordering and image ordering), each accompanied with a basic grounding test. TempVS requires MLLMs to rely on both visual and linguistic modalities to understand the temporal order of events. We evaluate 38 state-of-the-art MLLMs, demonstrating that models struggle to solve TempVS, with a substantial performance gap compared to human capabilities. We also provide fine-grained insights that suggest promising directions for future research. Our TempVS benchmark data and code are available at https://github.com/yjsong22/TempVS."
}
Markdown (Informal)
[Burn After Reading: Do Multimodal Large Language Models Truly Capture Order of Events in Image Sequences?](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.1248/) (Song et al., Findings 2025)
ACL