@inproceedings{muller-etal-2025-grouse,
title = "{G}ro{USE}: A Benchmark to Evaluate Evaluators in Grounded Question Answering",
author = "Muller, Sacha and
Loison, Antonio and
Omrani, Bilel and
Viaud, Gautier",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.304/",
pages = "4510--4534",
abstract = "Retrieval-Augmented Generation (RAG) has emerged as a common paradigm to use Large Language Models (LLMs) alongside private and up-to-date knowledge bases. In this work, we address the challenges of using LLM-as-a-Judge when evaluating grounded answers generated by RAG systems. To assess the calibration and discrimination capabilities of judge models, we identify 7 generator failure modes and introduce GroUSE (Grounded QA Unitary Scoring of Evaluators), a meta-evaluation benchmark of 144 unit tests. This benchmark reveals that existing automated RAG evaluation frameworks often overlook important failure modes, even when using GPT-4 as a judge. To improve on the current design of automated RAG evaluation frameworks, we propose a novel pipeline and find that while closed models perform well on GroUSE, state-of-the-art open-source judges do not generalize to our proposed criteria, despite strong correlation with GPT-4{'}s judgement. Our findings suggest that correlation with GPT-4 is an incomplete proxy for the practical performance of judge models and should be supplemented with evaluations on unit tests for precise failure mode detection. We further show that finetuning Llama-3 on GPT-4{'}s reasoning traces significantly boosts its evaluation capabilities, improving upon both correlation with GPT-4{'}s evaluations and calibration on reference situations"
}
Markdown (Informal)
[GroUSE: A Benchmark to Evaluate Evaluators in Grounded Question Answering](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.304/) (Muller et al., COLING 2025)
ACL