@inproceedings{chang-etal-2025-chatbench,
title = "{C}hat{B}ench: From Static Benchmarks to Human-{AI} Evaluation",
author = "Chang, Serina and
Anderson, Ashton and
Hofman, Jake M.",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1262/",
pages = "26009--26038",
ISBN = "979-8-89176-251-0",
abstract = "With the rapid adoption of LLM-based chat-bots, there is a pressing need to evaluate what humans and LLMs can achieve together. However, standard benchmarks, such as MMLU, measure LLM capabilities in isolation (i.e., ``AI-alone''). Here, we design and conduct a user study to convert MMLU questions into user-AI conversations, by seeding the user with the question and having them carry out a conversation with the LLM to answer their question. We release ChatBench, a new dataset with AI-alone, user-alone, and user-AI data for 396 questions and two LLMs, including 144K answers and 7,336 user-AI conversations. We find that AI-alone accuracy fails to predict user-AI accuracy, with significant differences across multiple subjects (math, physics, and moral reasoning), and we analyze the user-AI conversations to provide insight into how they diverge from AI-alone benchmarks. Finally, we show that fine-tuning a user simulator on a subset of ChatBench improves its ability to estimate user-AI accuracies, increasing correlation on held-out questions by more than 20 points, creating possibilities for scaling interactive evaluation."
}
Markdown (Informal)
[ChatBench: From Static Benchmarks to Human-AI Evaluation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1262/) (Chang et al., ACL 2025)
ACL
- Serina Chang, Ashton Anderson, and Jake M. Hofman. 2025. ChatBench: From Static Benchmarks to Human-AI Evaluation. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 26009–26038, Vienna, Austria. Association for Computational Linguistics.