@inproceedings{dagan-etal-2025-cast,
title = "{CAST}: Cross-modal Alignment Similarity Test for Vision Language Models",
author = "Dagan, Gautier and
Loginova, Olga and
Batra, Anil",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.93/",
pages = "1387--1402",
abstract = "Vision Language Models (VLMs) are typically evaluated with Visual Question Answering (VQA) tasks which assess a model`s understanding of scenes. Good VQA performance is taken as evidence that the model will perform well on a broader range of tasks that require both visual and language inputs. However, scene-aware VQA does not fully capture input biases or assess hallucinations caused by a misalignment between modalities. To address this, we propose a Cross-modal Alignment Similarity Test (CAST) to probe VLMs for self-consistency across modalities. This test involves asking the models to identify similarities between two scenes through text-only, image-only, or both and then assess the truthfulness of the similarities they generate. Since there is no ground-truth to compare against, this evaluation does not focus on objective accuracy but rather on whether VLMs are internally consistent in their outputs. We argue that while not all self-consistent models are capable or accurate, all capable VLMs must be self-consistent."
}
Markdown (Informal)
[CAST: Cross-modal Alignment Similarity Test for Vision Language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.93/) (Dagan et al., COLING 2025)
ACL