@inproceedings{dsouza-etal-2025-yescieval,
title = "{YES}ci{E}val: Robust {LLM}-as-a-Judge for Scientific Question Answering",
author = {D{'}Souza, Jennifer and
Babaei Giglou, Hamed and
M{\"u}nch, Quentin},
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.675/",
pages = "13749--13783",
ISBN = "979-8-89176-251-0",
abstract = "Large Language Models (LLMs) drive scientific question-answering on modern search engines, yet their evaluation robustness remains underexplored. We introduce YESciEval, an open-source framework that combines fine-grained rubric-based assessment with reinforcement learning to mitigate optimism bias in LLM evaluators. We release multidisciplinary scienceQ{\&}A datasets, including adversarial variants, with evaluation scores from multiple LLMs. Independent of proprietary models and human feedback, our approach enables scalable, cost-free evaluation. By advancing reliable LLM-as-a-judge models, this work supports AI alignment and fosters robust, transparent evaluation essential for scientific inquiry."
}
Markdown (Informal)
[YESciEval: Robust LLM-as-a-Judge for Scientific Question Answering](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.675/) (D’Souza et al., ACL 2025)
ACL