@inproceedings{lee-etal-2025-evaluating,
title = "Evaluating the Consistency of {LLM} Evaluators",
author = "Lee, Noah and
Hong, Jiwoo and
Thorne, James",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.710/",
pages = "10650--10659",
abstract = "Large language models (LLMs) have shown potential as general evaluators along with the evident benefits of speed and cost. While their correlation against human annotators has been widely studied, consistency as evaluators is still understudied, raising concerns about the reliability of LLM evaluators. In this paper, we conduct extensive studies on the two aspects of consistency in LLM evaluations, Self-Consistency (SC) and Inter-scale Consistency (IC), on different scoring scales and criterion granularity with open-source and proprietary models. Our comprehensive analysis demonstrates that strong proprietary models are not necessarily consistent evaluators, highlighting the importance of considering consistency in assessing the capability of LLM evaluators."
}
Markdown (Informal)
[Evaluating the Consistency of LLM Evaluators](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.710/) (Lee et al., COLING 2025)
ACL
- Noah Lee, Jiwoo Hong, and James Thorne. 2025. Evaluating the Consistency of LLM Evaluators. In Proceedings of the 31st International Conference on Computational Linguistics, pages 10650–10659, Abu Dhabi, UAE. Association for Computational Linguistics.