@inproceedings{mendonca-etal-2026-medal,
title = "{MEDAL}: A Framework for Benchmarking {LLM}s as Multilingual Open-Domain Dialogue Evaluators",
author = "Mendon{\c{c}}a, John and
Lavie, Alon and
Trancoso, Isabel",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.109/",
pages = "2069--2097",
ISBN = "979-8-89176-386-9",
abstract = "Evaluating the quality of open-domain chatbots has become increasingly reliant on LLMs acting as automatic judges. However, existing meta-evaluation benchmarks are static, outdated, and lacking in multilingual coverage, limiting their ability to fully capture subtle weaknesses in evaluation. We introduce MEDAL, an automated multi-agent framework for curating more representative and diverse open-domain dialogue evaluation benchmarks. Our approach leverages several LLMs to generate user-chatbot multilingual dialogues, conditioned on varied seed contexts. Then, a state-of-the-art LLM (GPT-4.1) is used for a multidimensional analysis of the performance of the chatbots, uncovering noticeable cross-lingual performance differences. Guided by this large-scale evaluation, we curate a new meta-evaluation multilingual benchmark and human-annotate samples with nuanced quality judgments. This benchmark is then used to assess the ability of several reasoning and non-reasoning LLMs to act as evaluators of open-domain dialogues. Using MEDAL, we uncover that state-of-the-art judges fail to reliably detect nuanced issues such as lack of empathy, common sense, or relevance."
}Markdown (Informal)
[MEDAL: A Framework for Benchmarking LLMs as Multilingual Open-Domain Dialogue Evaluators](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.109/) (Mendonça et al., Findings 2026)
ACL