@inproceedings{jin-etal-2024-winoviz,
title = "{WINOVIZ}: Probing Visual Properties of Objects Under Different States",
author = "Jin, Woojeong and
Srinivasan, Tejas and
Thomason, Jesse and
Ren, Xiang",
editor = "Tafreshi, Shabnam and
Akula, Arjun and
Sedoc, Jo{\~a}o and
Drozd, Aleksandr and
Rogers, Anna and
Rumshisky, Anna",
booktitle = "Proceedings of the Fifth Workshop on Insights from Negative Results in NLP",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.insights-1.14/",
doi = "10.18653/v1/2024.insights-1.14",
pages = "110--123",
abstract = "Humans interpret visual aspects of objects based on contexts. For example, a banana appears brown when rotten and green when unripe. Previous studies focused on language models' grasp of typical object properties. We introduce WINOVIZ, a text-only dataset with 1,380 examples of probing language models' reasoning about diverse visual properties under different contexts. Our task demands pragmatic and visual knowledge reasoning. We also present multi-hop data, a more challenging version requiring multi-step reasoning chains. Experimental findings include: a) GPT-4 excels overall but struggles with multi-hop data. b) Large models perform well in pragmatic reasoning but struggle with visual knowledge reasoning. c) Vision-language models outperform language-only models."
}
Markdown (Informal)
[WINOVIZ: Probing Visual Properties of Objects Under Different States](https://preview.aclanthology.org/fix-sig-urls/2024.insights-1.14/) (Jin et al., insights 2024)
ACL