@inproceedings{wu-etal-2019-generating,
title = "Generating Question Relevant Captions to Aid Visual Question Answering",
author = "Wu, Jialin and
Hu, Zeyuan and
Mooney, Raymond",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/P19-1348/",
doi = "10.18653/v1/P19-1348",
pages = "3585--3594",
abstract = "Visual question answering (VQA) and image captioning require a shared body of general knowledge connecting language and vision. We present a novel approach to better VQA performance that exploits this connection by jointly generating captions that are targeted to help answer a specific visual question. The model is trained using an existing caption dataset by automatically determining question-relevant captions using an online gradient-based method. Experimental results on the VQA v2 challenge demonstrates that our approach obtains state-of-the-art VQA performance (e.g. 68.4{\%} in the Test-standard set using a single model) by simultaneously generating question-relevant captions."
}
Markdown (Informal)
[Generating Question Relevant Captions to Aid Visual Question Answering](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/P19-1348/) (Wu et al., ACL 2019)
ACL