@inproceedings{sandan-etal-2025-knockout,
title = "Knockout {LLM} Assessment: Using Large Language Models for Evaluations through Iterative Pairwise Comparisons",
author = "Sandan, Isik Baran and
Dinh, Tu Anh and
Niehues, Jan",
editor = "Arviv, Ofir and
Clinciu, Miruna and
Dhole, Kaustubh and
Dror, Rotem and
Gehrmann, Sebastian and
Habba, Eliya and
Itzhak, Itay and
Mille, Simon and
Perlitz, Yotam and
Santus, Enrico and
Sedoc, Jo{\~a}o and
Shmueli Scheuer, Michal and
Stanovsky, Gabriel and
Tafjord, Oyvind",
booktitle = "Proceedings of the Fourth Workshop on Generation, Evaluation and Metrics (GEM{\texttwosuperior})",
month = jul,
year = "2025",
address = "Vienna, Austria and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/nschneid-patch-1/2025.gem-1.10/",
pages = "121--128",
ISBN = "979-8-89176-261-9",
abstract = "Large Language Models (LLMs) have shown to be effective evaluators across various domains such as machine translations or the scientific domain. Current LLM-as-a-Judge approaches rely mostly on individual assessments or a single round of pairwise assessments, preventing the judge LLM from developing a global ranking perspective.To address this, we present Knockout Assessment, an LLM-as-a-Judge method using a knockout tournament system with iterative pairwise comparisons. Experiments across three LLMs on two datasets show that knockout assessment improves scoring accuracy, increasing Pearson correlation with expert evaluations by 0.07 on average for university-level exam scoring and machine translation evaluations, aligning LLM assessments more closely with human scoring."
}
Markdown (Informal)
[Knockout LLM Assessment: Using Large Language Models for Evaluations through Iterative Pairwise Comparisons](https://preview.aclanthology.org/nschneid-patch-1/2025.gem-1.10/) (Sandan et al., GEM 2025)
ACL