@inproceedings{bai-etal-2025-ramqa,
title = "{RAMQA}: A Unified Framework for Retrieval-Augmented Multi-Modal Question Answering",
author = "Bai, Yang and
Grant, Christan and
Wang, Daisy Zhe",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.60/",
pages = "1061--1076",
ISBN = "979-8-89176-195-7",
abstract = "Multi-modal retrieval-augmented Question Answering (MRAQA), integrating text and images, has gained significant attention in information retrieval (IR) and natural language processing (NLP). Traditional ranking methods rely on small encoder-based language models, which are incompatible with modern decoder-based generative large language models (LLMs) that have advanced various NLP tasks. To bridge this gap, we propose RAMQA, a unified framework combining learning-to-rank methods with generative permutation-enhanced ranking techniques. We first train a pointwise multi-modal ranker using LLaVA as the backbone. Then, we apply instruction tuning to train a LLaMA model for re-ranking the top-k documents using an innovative autoregressive multi-task learning approach. Our generative ranking model generates re-ranked document IDs and specific answers from document candidates in various permutations. Experiments on two MRAQA benchmarks, WebQA and MultiModalQA, show significant improvements over strong baselines, highlighting the effectiveness of our approach. Data and code will be made public once the paper is accepted."
}
Markdown (Informal)
[RAMQA: A Unified Framework for Retrieval-Augmented Multi-Modal Question Answering](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.60/) (Bai et al., Findings 2025)
ACL