@inproceedings{ding-etal-2026-objchangevr,
title = "{O}bj{C}hange{VR}: Object State Change Reasoning from Continuous Egocentric Views in {VR} Environments",
author = "Ding, Shiyi and
Wu, Shaoen and
Chen, Ying",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.272/",
pages = "5795--5812",
ISBN = "979-8-89176-380-7",
abstract = "Recent advances in multimodal large language models (MLLMs) offer a promising approach for natural language-based scene change queries in virtual reality (VR). Prior work on applying MLLMs for object state understanding has focused on egocentric videos that capture the camera wearer{'}s interactions with objects. However, object state changes may occur in the background without direct user interaction, lacking explicit motion cues and making them difficult to detect. Moreover, no benchmark exists for evaluating this challenging scenario. To address these challenges, we introduce ObjChangeVR-Dataset, specifically for benchmarking the question-answering task of object state change. We also propose ObjChangeVR, a framework that combines viewpoint-aware and temporal-based retrieval to identify relevant frames, along with cross-view reasoning that reconciles inconsistent evidence from multiple viewpoints. Extensive experiments demonstrate that ObjChangeVR significantly outperforms baseline approaches across multiple MLLMs."
}Markdown (Informal)
[ObjChangeVR: Object State Change Reasoning from Continuous Egocentric Views in VR Environments](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.272/) (Ding et al., EACL 2026)
ACL