@inproceedings{si-etal-2022-examining,
title = "Re-Examining Calibration: The Case of Question Answering",
author = "Si, Chenglei and
Zhao, Chen and
Min, Sewon and
Boyd-Graber, Jordan",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.204/",
doi = "10.18653/v1/2022.findings-emnlp.204",
pages = "2814--2829",
abstract = "For users to trust model predictions, they need to understand model outputs, particularly their confidence {---} calibration aims to adjust (calibrate) models' confidence to match expected accuracy. We argue that the traditional calibration evaluation does not promote effective calibrations: for example, it can encourage always assigning a mediocre confidence score to all predictions, which does not help users distinguish correct predictions from wrong ones. Building on those observations, we propose a new calibration metric, MacroCE, that better captures whether the model assigns low confidence to wrong predictions and high confidence to correct predictions. Focusing on the practical application of open-domain question answering, we examine conventional calibration methods applied on the widely-used retriever-reader pipeline, all of which do not bring significant gains under our new MacroCE metric. Toward better calibration, we propose a new calibration method (ConsCal) that uses not just final model predictions but whether multiple model checkpoints make consistent predictions. Altogether, we provide an alternative view of calibration along with a new metric, re-evaluation of existing calibration methods on our metric, and proposal of a more effective calibration method."
}
Markdown (Informal)
[Re-Examining Calibration: The Case of Question Answering](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.204/) (Si et al., Findings 2022)
ACL
- Chenglei Si, Chen Zhao, Sewon Min, and Jordan Boyd-Graber. 2022. Re-Examining Calibration: The Case of Question Answering. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 2814–2829, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.