@inproceedings{zhang-etal-2024-mar,
title = "{MAR}: Matching-Augmented Reasoning for Enhancing Visual-based Entity Question Answering",
author = "Zhang, Zhengxuan and
Wu, Yin and
Luo, Yuyu and
Tang, Nan",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.91/",
doi = "10.18653/v1/2024.emnlp-main.91",
pages = "1520--1530",
abstract = "A multimodal large language model MLLMs may struggle with answering visual-based (personal) entity questions (VEQA), such as {\textquotedblright}who is A?{\textquotedblright} or {\textquotedblright}who is A that B is talking to?{\textquotedblright} for various reasons, e.g., the absence of the name of A in the caption or the inability of MLLMs to recognize A, particularly for less common entities. Furthermore, even if the MLLMs can identify A, it may refrain from answering due to privacy concerns. In this paper, we introduce a novel method called Matching-Augmented Reasoning (MAR) to enhance VEQA. Given a collection of visual objects with captions, MAR preprocesses each object individually, identifying faces, names, and their alignments within the object. It encodes this information and stores their vector representations in vector databases. When handling VEQA, MAR retrieves matching faces and names and organizes these entities into a matching graph. MAR then derives the answer to the query by reasoning over this matching graph. Extensive experiments show that MAR significantly improves VEQA compared with the state-of-the-art methods using MLLMs."
}
Markdown (Informal)
[MAR: Matching-Augmented Reasoning for Enhancing Visual-based Entity Question Answering](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.91/) (Zhang et al., EMNLP 2024)
ACL