@inproceedings{groot-valdenegro-toro-2024-overconfidence,
title = "Overconfidence is Key: Verbalized Uncertainty Evaluation in Large Language and Vision-Language Models",
author = "Groot, Tobias and
Valdenegro - Toro, Matias",
editor = "Ovalle, Anaelia and
Chang, Kai-Wei and
Cao, Yang Trista and
Mehrabi, Ninareh and
Zhao, Jieyu and
Galstyan, Aram and
Dhamala, Jwala and
Kumar, Anoop and
Gupta, Rahul",
booktitle = "Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.trustnlp-1.13/",
doi = "10.18653/v1/2024.trustnlp-1.13",
pages = "145--171",
abstract = "Language and Vision-Language Models (LLMs/VLMs) have revolutionized the field of AI by their ability to generate human-like text and understand images, but ensuring their reliability is crucial. This paper aims to evaluate the ability of LLMs (GPT4, GPT-3.5, LLaMA2, and PaLM 2) and VLMs (GPT4V and Gemini Pro Vision) to estimate their verbalized uncertainty via prompting. We propose the new Japanese Uncertain Scenes (JUS) dataset, aimed at testing VLM capabilities via difficult queries and object counting, and the Net Calibration Error (NCE) to measure direction of miscalibration.Results show that both LLMs and VLMs have a high calibration error and are overconfident most of the time, indicating a poor capability for uncertainty estimation. Additionally we develop prompts for regression tasks, and we show that VLMs have poor calibration when producing mean/standard deviation and 95{\%} confidence intervals."
}
Markdown (Informal)
[Overconfidence is Key: Verbalized Uncertainty Evaluation in Large Language and Vision-Language Models](https://preview.aclanthology.org/fix-sig-urls/2024.trustnlp-1.13/) (Groot & Valdenegro - Toro, TrustNLP 2024)
ACL