@inproceedings{lu-etal-2024-ahp,
title = "{AHP}-Powered {LLM} Reasoning for Multi-Criteria Evaluation of Open-Ended Responses",
author = "Lu, Xiaotian and
Li, Jiyi and
Takeuchi, Koh and
Kashima, Hisashi",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2024.findings-emnlp.101/",
doi = "10.18653/v1/2024.findings-emnlp.101",
pages = "1847--1856",
abstract = "Question answering (QA) tasks have been extensively studied in the field of natural language processing (NLP). Answers to open-ended questions are highly diverse and difficult to quantify, and cannot be simply evaluated as correct or incorrect, unlike close-ended questions with definitive answers. While large language models (LLMs) have demonstrated strong capabilities across various tasks, they exhibit relatively weaker performance in evaluating answers to open-ended questions. In this study, we propose a method that leverages LLMs and the analytic hierarchy process (AHP) to assess answers to open-ended questions. We utilized LLMs to generate multiple evaluation criteria for a question. Subsequently, answers were subjected to pairwise comparisons under each criterion with LLMs, and scores for each answer were calculated in the AHP. We conducted experiments on four datasets using both ChatGPT-3.5-turbo and GPT-4. Our results indicate that our approach more closely aligns with human judgment compared to the four baselines. Additionally, we explored the impact of the number of criteria, variations in models, and differences in datasets on the results."
}
Markdown (Informal)
[AHP-Powered LLM Reasoning for Multi-Criteria Evaluation of Open-Ended Responses](https://preview.aclanthology.org/landing_page/2024.findings-emnlp.101/) (Lu et al., Findings 2024)
ACL