@inproceedings{varshney-etal-2022-investigating,
title = "Investigating Selective Prediction Approaches Across Several Tasks in {IID}, {OOD}, and Adversarial Settings",
author = "Varshney, Neeraj and
Mishra, Swaroop and
Baral, Chitta",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.158/",
doi = "10.18653/v1/2022.findings-acl.158",
pages = "1995--2002",
abstract = "In order to equip NLP systems with {\textquoteleft}selective prediction' capability, several task-specific approaches have been proposed. However, which approaches work best across tasks or even if they consistently outperform the simplest baseline MaxProb remains to be explored. To this end, we systematically study selective prediction in a large-scale setup of 17 datasets across several NLP tasks. Through comprehensive experiments under in-domain (IID), out-of-domain (OOD), and adversarial (ADV) settings, we show that despite leveraging additional resources (held-out data/computation), none of the existing approaches consistently and considerably outperforms MaxProb in all three settings. Furthermore, their performance does not translate well across tasks. For instance, Monte-Carlo Dropout outperforms all other approaches on Duplicate Detection datasets but does not fare well on NLI datasets, especially in the OOD setting. Thus, we recommend that future selective prediction approaches should be evaluated across tasks and settings for reliable estimation of their capabilities."
}
Markdown (Informal)
[Investigating Selective Prediction Approaches Across Several Tasks in IID, OOD, and Adversarial Settings](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.158/) (Varshney et al., Findings 2022)
ACL