@inproceedings{wang-etal-2025-vigil3d,
title = "{V}i{G}i{L}3{D}: A Linguistically Diverse Dataset for 3{D} Visual Grounding",
author = "Wang, Austin and
Gong, ZeMing and
Chang, Angel X",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1470/",
pages = "30453--30475",
ISBN = "979-8-89176-251-0",
abstract = "3D visual grounding (3DVG) involves localizing entities in a 3D scene referred to by natural language text. Such models are useful for embodied AI and scene retrieval applications, which involve searching for objects or patterns using natural language descriptions. While recent works have focused on LLM-based scaling of 3DVG datasets, these datasets do not capture the full range of potential prompts which could be specified in the English language. To ensure that we are scaling up and testing against a useful and representative set of prompts, we propose a framework for linguistically analyzing 3DVG prompts and introduce Visual Grounding with Diverse Language in 3D (ViGiL3D), a diagnostic dataset for evaluating visual grounding methods against a diverse set of language patterns. We evaluate existing open-vocabulary 3DVG methods to demonstrate that these methods are not yet proficient in understanding and identifying the targets of more challenging, out-of-distribution prompts, toward real-world applications."
}
Markdown (Informal)
[ViGiL3D: A Linguistically Diverse Dataset for 3D Visual Grounding](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1470/) (Wang et al., ACL 2025)
ACL