@inproceedings{li-etal-2024-diversify,
title = "Diversify, Rationalize, and Combine: Ensembling Multiple {QA} Strategies for Zero-shot Knowledge-based {VQA}",
author = "Li, Miaoyu and
Li, Haoxin and
Du, Zilin and
Li, Boyang",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.84/",
doi = "10.18653/v1/2024.findings-emnlp.84",
pages = "1552--1566",
abstract = "Knowledge-based Visual Qustion-answering (K-VQA) often requires the use of background knowledge beyond the image. However, we discover that a single knowledge generation strategy is often insuffcient for all K-VQA questions. To this end, we propose Diversifcation, Evidence Truncation, and Combination for Knowledge-based Elucidation (DietCoke), which utilizes a bundle of complementary question-answering tactics and aggregates their answers using textual rationales. DietCoke comprises of three stages: diversifcation, rationalization, and ensemble. The diversification stage generates three distinctive decision contexts, each leading to its own answer candidate. The rationalization stage generates two rationales, the automatic rationale and the mechanistic rationale, for each answer candidate using decorrelated techniques. Finally, in the ensemble stage, an LLM informed by the rationales selects one answer from the three candidates. Experiments show that DietCoke significantly outperforms state-of-the-art LLM-based baselines by 2.8{\%} on OK-VOA and 4.7{\%} on A-OKVOA and that the strategies in the ensembles are highly complementary."
}
Markdown (Informal)
[Diversify, Rationalize, and Combine: Ensembling Multiple QA Strategies for Zero-shot Knowledge-based VQA](https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.84/) (Li et al., Findings 2024)
ACL