@inproceedings{wu-etal-2025-glyphpattern,
title = "{G}lyph{P}attern: An Abstract Pattern Recognition for Vision-Language Models",
author = "Wu, Zixuan and
Kim, Yoolim and
Anderson, Carolyn Jane",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.63/",
pages = "1140--1175",
ISBN = "979-8-89176-256-5",
abstract = "Vision-Language Models (VLMs) have made rapid progress in reasoning across visual and textual data. While VLMs perform well on vision tasks that they are trained on, our results highlight key challenges in abstract pattern recognition. We present GlyphPattern, a 954 item dataset that pairs 318 human-written descriptions of visual patterns from 40 writing systems with three visual presentation styles.GlyphPattern evaluates abstract pattern recognition in VLMs, requiring models to understand and judge natural language descriptions of visual patterns. GlyphPattern patterns are drawn from a large-scale cognitive science investigation of human writing systems; as a result, they are rich in spatial reference and compositionality. Our experiments show that GlyphPattern is challenging for state-of-the-art VLMs (GPT-4o achieves only 55{\%} accuracy), with marginal gains from few-shot prompting. Our detailed analysis reveals errors at multiple levels, including visual processing, natural language understanding, and pattern generalization."
}
Markdown (Informal)
[GlyphPattern: An Abstract Pattern Recognition for Vision-Language Models](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.63/) (Wu et al., Findings 2025)
ACL