@inproceedings{kirstein-etal-2025-meeting,
title = "Is my Meeting Summary Good? Estimating Quality with a Multi-{LLM} Evaluator",
author = "Kirstein, Frederic Thomas and
Lima Ruas, Terry and
Gipp, Bela",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven and
Darwish, Kareem and
Agarwal, Apoorv",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics: Industry Track",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.48/",
pages = "561--574",
abstract = "The quality of meeting summaries generated by natural language generation (NLG) systems is hard to measure automatically. Established metrics such as ROUGE and BERTScore have a relatively low correlation with human judgments and fail to capture nuanced errors. Recent studies suggest using large language models (LLMs), which have the benefit of better context understanding and adaption of error definitions without training on a large number of human preference judgments. However, current LLM-based evaluators risk masking errors and can only serve as a weak proxy, leaving human evaluation the gold standard despite being costly and hard to compare across studies. In this work, we present MESA, an LLM-based framework employing a three-step assessment of individual error types, multi-agent discussion for decision refinement, and feedback-based self-training to refine error definition understanding and alignment with human judgment. We show that MESA`s components enable thorough error detection, consistent rating, and adaptability to custom error guidelines. Using GPT-4o as its backbone, MESA achieves mid to high Point-Biserial correlation with human judgment in error detection and mid Spearman and Kendall correlation in reflecting error impact on summary quality, on average 0.25 higher than previous methods. The framework`s flexibility in adapting to custom error guidelines makes it suitable for various tasks with limited human-labeled data."
}
Markdown (Informal)
[Is my Meeting Summary Good? Estimating Quality with a Multi-LLM Evaluator](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.48/) (Kirstein et al., COLING 2025)
ACL