@inproceedings{ramaswamy-etal-2025-model,
title = "Model Consistency as a Cheap yet Predictive Proxy for {LLM} Elo Scores",
author = "Ramaswamy, Ashwin and
Demeure, Nestor and
Rrapaj, Ermal",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1534/",
doi = "10.18653/v1/2025.emnlp-main.1534",
pages = "30155--30163",
ISBN = "979-8-89176-332-6",
abstract = "New large language models (LLMs) are being released every day. Some perform significantly better or worse than expected given their parameter count. Therefore, there is a need for a method to independently evaluate models. The current best way to evaluate a model is to measure its Elo score by comparing it to other models in a series of contests{---}an expensive operation since humans are ideally required to compare LLM outputs. We observe that when an LLM is asked to judge such contests, the consistency with which it selects a model as the best in a matchup produces a metric that is 91{\%} correlated with its own human-produced Elo score. This provides a simple proxy for Elo scores that can be computed cheaply, without any human data or prior knowledge."
}Markdown (Informal)
[Model Consistency as a Cheap yet Predictive Proxy for LLM Elo Scores](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1534/) (Ramaswamy et al., EMNLP 2025)
ACL