@inproceedings{muttenthaler-etal-2020-unsupervised,
title = "Unsupervised Evaluation for Question Answering with Transformers",
author = "Muttenthaler, Lukas and
Augenstein, Isabelle and
Bjerva, Johannes",
editor = "Alishahi, Afra and
Belinkov, Yonatan and
Chrupa{\l}a, Grzegorz and
Hupkes, Dieuwke and
Pinter, Yuval and
Sajjad, Hassan",
booktitle = "Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.blackboxnlp-1.8/",
doi = "10.18653/v1/2020.blackboxnlp-1.8",
pages = "83--90",
abstract = "It is challenging to automatically evaluate the answer of a QA model at inference time. Although many models provide confidence scores, and simple heuristics can go a long way towards indicating answer correctness, such measures are heavily dataset-dependent and are unlikely to generalise. In this work, we begin by investigating the hidden representations of questions, answers, and contexts in transformer-based QA architectures. We observe a consistent pattern in the answer representations, which we show can be used to automatically evaluate whether or not a predicted answer span is correct. Our method does not require any labelled data and outperforms strong heuristic baselines, across 2 datasets and 7 domains. We are able to predict whether or not a model`s answer is correct with 91.37{\%} accuracy on SQuAD, and 80.7{\%} accuracy on SubjQA. We expect that this method will have broad applications, e.g., in semi-automatic development of QA datasets."
}
Markdown (Informal)
[Unsupervised Evaluation for Question Answering with Transformers](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.blackboxnlp-1.8/) (Muttenthaler et al., BlackboxNLP 2020)
ACL