@inproceedings{yang-etal-2024-large,
title = "Can Large Multimodal Models Uncover Deep Semantics Behind Images?",
author = "Yang, Yixin and
Li, Zheng and
Dong, Qingxiu and
Xia, Heming and
Sui, Zhifang",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-acl.113/",
doi = "10.18653/v1/2024.findings-acl.113",
pages = "1898--1912",
abstract = "Understanding the deep semantics of images is essential in the era dominated by social media. However, current research works primarily on the superficial description of images, revealing a notable deficiency in the systematic investigation of the inherent deep semantics. In this work, we introduce DEEPEVAL, a comprehensive benchmark to assess Large Multimodal Models' (LMMs) capacities of visual deep semantics. DEEPEVAL includes human-annotated dataset and three progressive subtasks: fine-grained description selection, in-depth title matching, and deep semantics understanding. Utilizing DEEPEVAL, we evaluate 9 open-source LMMs and GPT-4V(ision). Our evaluation demonstrates a substantial gap between the deep semantic comprehension capabilities of existing LMMs and humans. For example, GPT-4V is 30{\%} behind humans in understanding deep semantics, even though it achieves human-comparable performance in image description. Further analysis reveals that LMM performance on DEEPEVAL varies according to the specific facets of deep semantics explored, indicating the fundamental challenges remaining in developing LMMs."
}
Markdown (Informal)
[Can Large Multimodal Models Uncover Deep Semantics Behind Images?](https://preview.aclanthology.org/fix-sig-urls/2024.findings-acl.113/) (Yang et al., Findings 2024)
ACL