@inproceedings{khan-etal-2025-brit,
title = "{BRIT}: Bidirectional Retrieval over Unified Image-Text Graph",
author = "Khan, Ainulla and
Yamada, Moyuru and
Akella, Srinidhi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.1211/",
doi = "10.18653/v1/2025.findings-emnlp.1211",
pages = "22232--22248",
ISBN = "979-8-89176-335-7",
abstract = "Retrieval-Augmented Generation (RAG) has emerged as a promising technique to enhance the quality and relevance of responses generated by large language models. While recent advancements have mainly focused on improving RAG for text-based queries, RAG on multi-modal documents containing both texts and images has not been fully explored. Especially when fine-tuning does not work. This paper proposes BRIT, a novel multi-modal RAG framework that effectively unifies various text-image connections in the document into a multi-modal graph and retrieves the texts and images as a query-specific sub-graph. By traversing both image-to-text and text-to-image paths in the graph, BRIT retrieve not only directly query-relevant images and texts but also further relevant contents to answering complex cross-modal multi-hop questions. To evaluate the effectiveness of BRIT, we introduce MM-RAG test set specifically designed for multi-modal question answering tasks that require to understand the text-image relations. Our comprehensive experiments demonstrate the superiority of BRIT, highlighting its ability to handle cross-modal questions on the multi-modal documents."
}Markdown (Informal)
[BRIT: Bidirectional Retrieval over Unified Image-Text Graph](https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.1211/) (Khan et al., Findings 2025)
ACL