@inproceedings{tater-etal-2025-absvis,
title = "{A}bs{V}is {--} Benchmarking How Humans and Vision-Language Models ``See'' Abstract Concepts in Images",
author = "Tater, Tarun and
Frassinelli, Diego and
Schulte im Walde, Sabine",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.417/",
pages = "8271--8292",
ISBN = "979-8-89176-332-6",
abstract = "Abstract concepts like mercy and peace often lack clear visual grounding, and thus challenge humans and models to provide suitable image representations. To address this challenge, we introduce AbsVis {--} a dataset of 675 images annotated with 14,175 concept{--}explanation attributions from humans and two Vision-Language Models (VLMs: Qwen and LLaVA), where each concept is accompanied by a textual explanation. We compare human and VLM attributions in terms of diversity, abstractness, and alignment, and find that humans attribute more varied concepts. AbsVis also includes 2,680 human preference judgments evaluating the quality of a subset of these annotations, showing that overlapping concepts (attributed by both humans and VLMs) are most preferred. Explanations clarify and strengthen the perceived attributions, both from humans and VLMs. Explanations clarify and strengthen the perceived attributions, both from human and VLMs. Finally, we show that VLMs can approximate human preferences and use them to fine-tune VLMs via Direct Preference Optimization (DPO), yielding improved alignments with preferred concept{--}explanation pairs."
}Markdown (Informal)
[AbsVis – Benchmarking How Humans and Vision-Language Models “See” Abstract Concepts in Images](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.417/) (Tater et al., EMNLP 2025)
ACL