@inproceedings{vasselli-etal-2025-measuring,
title = "Measuring the Robustness of Reference-Free Dialogue Evaluation Systems",
author = "Vasselli, Justin and
Nohejl, Adam and
Watanabe, Taro",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.331/",
pages = "4958--4972",
abstract = "Advancements in dialogue systems powered by large language models (LLMs) have outpaced the development of reliable evaluation metrics, particularly for diverse and creative responses. We present a benchmark for evaluating the robustness of reference-free dialogue metrics against four categories of adversarial attacks: speaker tag prefixes, static responses, ungrammatical responses, and repeated conversational context. We analyze metrics such as DialogRPT, UniEval, and PromptEval{---}a prompt-based method leveraging LLMs{---}across grounded and ungrounded datasets. By examining both their correlation with human judgment and susceptibility to adversarial attacks, we find that these two axes are not always aligned; metrics that appear to be equivalent when judged by traditional benchmarks may, in fact, vary in their scores of adversarial responses. These findings motivate the development of nuanced evaluation frameworks to address real-world dialogue challenges."
}
Markdown (Informal)
[Measuring the Robustness of Reference-Free Dialogue Evaluation Systems](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.331/) (Vasselli et al., COLING 2025)
ACL