@inproceedings{yang-etal-2024-decompose,
title = "Decompose and Compare Consistency: Measuring {VLM}s' Answer Reliability via Task-Decomposition Consistency Comparison",
author = "Yang, Qian and
Yan, Weixiang and
Agrawal, Aishwarya",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/moar-dois/2024.emnlp-main.211/",
doi = "10.18653/v1/2024.emnlp-main.211",
pages = "3613--3627",
abstract = "Despite tremendous advancements, current state-of-the-art Vision-Language Models (VLMs) are still far from perfect. They tend to hallucinate and may generate biased responses. In such circumstances, having a way to assess the reliability of a given response generated by a VLM is quite useful. Existing methods, such as estimating uncertainty using answer likelihoods or prompt-based confidence generation, often suffer from overconfidence. Other methods use self-consistency comparison but are affected by confirmation biases. To alleviate these, we propose Decompose and Compare Consistency (DeCC) for reliability measurement. By comparing the consistency between the direct answer generated using the VLM{'}s internal reasoning process, and the indirect answers obtained by decomposing the question into sub-questions and reasoning over the sub-answers produced by the VLM, DeCC measures the reliability of VLM{'}s direct answer. Experiments across six vision-language tasks with three VLMs show DeCC{'}s reliability estimation achieves better correlation with task accuracy compared to the existing methods."
}
Markdown (Informal)
[Decompose and Compare Consistency: Measuring VLMs’ Answer Reliability via Task-Decomposition Consistency Comparison](https://preview.aclanthology.org/moar-dois/2024.emnlp-main.211/) (Yang et al., EMNLP 2024)
ACL