@inproceedings{cohen-etal-2025-performance,
title = "Performance Gap in Entity Knowledge Extraction Across Modalities in Vision Language Models",
author = "Cohen, Ido and
Gottesman, Daniela and
Geva, Mor and
Giryes, Raja",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1411/",
pages = "29095--29108",
ISBN = "979-8-89176-251-0",
abstract = "Vision-language models (VLMs) excel at extracting and reasoning about information from images. Yet, their capacity to leverage internal knowledge about specific entities remains underexplored. This work investigates the disparity in model performance when answering factual questions about an entity described in text versus depicted in an image. Our results reveal a significant accuracy drop {---} reaching 18{\%} for some models {---} when the entity is presented visually instead of textually. To study this gap we present PopVQA, a dataset which allows separating entity recognition and question answering, and use it to benchmark several models. We hypothesize that this decline arises from limitations in how information flows from image tokens to query tokens. Thus, we use mechanistic interpretability tools to reveal that, although image tokens are preprocessed by the vision encoder, meaningful information flow from these tokens occurs only in the much deeper layers. Furthermore, critical image processing happens in the language model{'}s middle layers, allowing few layers for consecutive reasoning, highlighting a potential inefficiency in how the model utilizes its layers for reasoning. These insights shed light on the internal mechanics of VLMs and offer pathways for enhancing their reasoning capabilities. PopVQA can be found at https://huggingface.co/datasets/idoco/PopVQA."
}
Markdown (Informal)
[Performance Gap in Entity Knowledge Extraction Across Modalities in Vision Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1411/) (Cohen et al., ACL 2025)
ACL