@inproceedings{frank-allaway-2025-visage,
title = "{VIS}a{GE}: Understanding Visual Generics and Exceptions",
author = "Frank, Stella and
Allaway, Emily",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1655/",
doi = "10.18653/v1/2025.emnlp-main.1655",
pages = "32537--32546",
ISBN = "979-8-89176-332-6",
abstract = "While Vision Language Models (VLMs) learn conceptual representations, in the form of generalized knowledge, during training, they are typically used to analyze individual instances. When evaluation instances are atypical, this paradigm results in tension between two priors in the model. The first is a pragmatic prior that the textual and visual input are both relevant, arising from VLM finetuning on congruent inputs; the second is a semantic prior that the conceptual representation is generally true for instances of the category. In order to understand how VLMs trade off these priors, we introduce a new evaluation dataset, VISaGE, consisting of both typical and exceptional images. In carefully balanced experiments, we show that conceptual understanding degrades when the assumption of congruency underlying the pragmatic prior is violated with incongruent images. This effect is stronger than the effect of the semantic prior when querying about individual instances"
}Markdown (Informal)
[VISaGE: Understanding Visual Generics and Exceptions](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.1655/) (Frank & Allaway, EMNLP 2025)
ACL