@inproceedings{hardy-2025-glitters,
title = "``All that Glitters'': Techniques for Evaluations with Unreliable Model and Human Annotations",
author = "Hardy, Michael",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.120/",
pages = "2250--2278",
ISBN = "979-8-89176-195-7",
abstract = "``Gold'' and ``ground truth'' human-mediated labels have error. This error can escape commonly reported metrics of label quality or obscure questions of accuracy, bias, fairness, and usefulness during model evaluation. This study demonstrates methods for answering such questions even in the context of very low reliabilities from expert humans. We analyze human labels, GPT model ratings, and transformer encoder model ratings of the quality of classroom teaching from two LLM architecture families{--}encoders and GPT decoders. First, we demonstrate that using standard metrics in the presence of poor labels can mask both label and model quality. The encoder family of models achieve state-of-the-art, even ``super-human'', results across all classroom annotation tasks using standard metrics. However, evaluation techniques accounting for unreliable labels reveal important flaws, including spurious correlations and nonrandom racial biases across models and humans. We estimate that if models were used in a human-in-the-loop context, the variance contributed by GPT model labels would worsen ratings. These techniques also highlight tasks where encoders could offer 80{\%} reduction in human costs while also reducing bias."
}
Markdown (Informal)
[“All that Glitters”: Techniques for Evaluations with Unreliable Model and Human Annotations](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.120/) (Hardy, Findings 2025)
ACL