@inproceedings{zhao-etal-2025-challenges,
title = "Challenges in Trustworthy Human Evaluation of Chatbots",
author = "Zhao, Wenting and
Rush, Alexander M and
Goyal, Tanya",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.186/",
pages = "3359--3365",
ISBN = "979-8-89176-195-7",
abstract = "Recently, open community-driven platforms like Chatbot Arena that collect user preference data from site visitors have gained reputation as trustworthy publicly available benchmarks for LLM performance. While gold standard, it is often tricky to implement the required guardrails to collect high-quality annotations from humans. In this paper, we demonstrate that different source of bad annotations, both malicious and otherwise, can corrupt the reliability of open leaderboard rankings. In particular, we show that only 10{\%} of poor quality votes by apathetic (site visitors not appropriately incentivized to give correct votes) or adversarial (bad actors seeking to inflate the ranking of a target model) annotators can change the rankings of models by up to 5 places on the leaderboard. Finally, we discuss open challenges in ensuring high quality human annotations."
}
Markdown (Informal)
[Challenges in Trustworthy Human Evaluation of Chatbots](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.186/) (Zhao et al., Findings 2025)
ACL