@inproceedings{jian-etal-2025-teaching,
title = "Teaching Vision-Language Models to Ask: Resolving Ambiguity in Visual Questions",
author = "Jian, Pu and
Yu, Donglei and
Yang, Wen and
Ren, Shuo and
Zhang, Jiajun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.182/",
pages = "3619--3638",
ISBN = "979-8-89176-251-0",
abstract = "In visual question answering (VQA) context, users often pose ambiguous questions to visual language models (VLMs) due to varying expression habits. Existing research addresses such ambiguities primarily by rephrasing questions. These approaches neglect the inherently interactive nature of user interactions with VLMs, where ambiguities can be clarified through user feedback. However, research on interactive clarification faces two major challenges: (1) Benchmarks are absent to assess VLMs' capacity for resolving ambiguities through interaction; (2) VLMs are trained to prefer answering rather than asking, preventing them from seeking clarification. To overcome these challenges, we introduce ClearVQA benchmark, which targets three common categories of ambiguity in VQA context, and encompasses various VQA scenarios. Furthermore, we propose an automated pipeline to generate ambiguity-clarification question pairs, enabling VLMs to ask reasonable clarification questions and generate more accurate and specific answers based on user feedback, as demonstrated by experimental results."
}
Markdown (Informal)
[Teaching Vision-Language Models to Ask: Resolving Ambiguity in Visual Questions](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.182/) (Jian et al., ACL 2025)
ACL