@inproceedings{luo-etal-2023-unifying,
title = "Unifying Text, Tables, and Images for Multimodal Question Answering",
author = "Luo, Haohao and
Shen, Ying and
Deng, Yang",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.626/",
doi = "10.18653/v1/2023.findings-emnlp.626",
pages = "9355--9367",
abstract = "Multimodal question answering (MMQA), which aims to derive the answer from multiple knowledge modalities (e.g., text, tables, and images), has received increasing attention due to its board applications. Current approaches to MMQA often rely on single-modal or bi-modal QA models, which limits their ability to effectively integrate information across all modalities and leverage the power of pre-trained language models. To address these limitations, we propose a novel framework called UniMMQA, which unifies three different input modalities into a text-to-text format by employing position-enhanced table linearization and diversified image captioning techniques. Additionally, we enhance cross-modal reasoning by incorporating a multimodal rationale generator, which produces textual descriptions of cross-modal relations for adaptation into the text-to-text generation process. Experimental results on three MMQA benchmark datasets show the superiority of UniMMQA in both supervised and unsupervised settings."
}
Markdown (Informal)
[Unifying Text, Tables, and Images for Multimodal Question Answering](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.626/) (Luo et al., Findings 2023)
ACL