@inproceedings{ding-etal-2025-lvlms,
title = "Do {LVLM}s Know What They Know? A Systematic Study of Knowledge Boundary Perception in {LVLM}s",
author = "Ding, Zhikai and
Ni, Shiyu and
Bi, Keping",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1081/",
doi = "10.18653/v1/2025.findings-emnlp.1081",
pages = "19832--19848",
ISBN = "979-8-89176-335-7",
abstract = "Large Vision-Language Models (LVLMs) demonstrate strong visual question answering (VQA) capabilities but are shown to hallucinate. A reliable model should perceive its knowledge boundaries{---}knowing what it knows and what it does not. This paper investigates LVLMs' perception of their knowledge boundaries by evaluating three types of confidence signals: probabilistic confidence, answer consistency-based confidence, and verbalized confidence. Experiments on three LVLMs across three VQA datasets show that, although LVLMs possess a reasonable perception level, there is substantial room for improvement. Among the three confidence, probabilistic and consistency-based signals are more reliable indicators, while verbalized confidence often leads to overconfidence. To enhance LVLMs' perception, we adapt several established confidence calibration methods from Large Language Models (LLMs) and propose three effective methods. Additionally, we compare LVLMs with their LLM counterparts, finding that jointly processing visual and textual inputs decreases question-answering performance but reduces confidence, resulting in improved perception level compared to LLMs."
}Markdown (Informal)
[Do LVLMs Know What They Know? A Systematic Study of Knowledge Boundary Perception in LVLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1081/) (Ding et al., Findings 2025)
ACL