@article{wang-etal-2022-uncertainty,
title = "Uncertainty Estimation and Reduction of Pre-trained Models for Text Regression",
author = "Wang, Yuxia and
Beck, Daniel and
Baldwin, Timothy and
Verspoor, Karin",
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "10",
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.tacl-1.39/",
doi = "10.1162/tacl_a_00483",
pages = "680--696",
abstract = "State-of-the-art classification and regression models are often not well calibrated, and cannot reliably provide uncertainty estimates, limiting their utility in safety-critical applications such as clinical decision-making. While recent work has focused on calibration of classifiers, there is almost no work in NLP on calibration in a regression setting. In this paper, we quantify the calibration of pre- trained language models for text regression, both intrinsically and extrinsically. We further apply uncertainty estimates to augment training data in low-resource domains. Our experiments on three regression tasks in both self-training and active-learning settings show that uncertainty estimation can be used to increase overall performance and enhance model generalization."
}
Markdown (Informal)
[Uncertainty Estimation and Reduction of Pre-trained Models for Text Regression](https://preview.aclanthology.org/fix-sig-urls/2022.tacl-1.39/) (Wang et al., TACL 2022)
ACL