@inproceedings{he-etal-2024-socreval,
title = "{S}oc{RE}val: Large Language Models with the Socratic Method for Reference-free Reasoning Evaluation",
author = "He, Hangfeng and
Zhang, Hongming and
Roth, Dan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2024.findings-naacl.175/",
doi = "10.18653/v1/2024.findings-naacl.175",
pages = "2736--2764",
abstract = "To comprehensively gauge the capacity of current models for complex reasoning, it is crucial to assess their step-by-step reasoning in a scalable manner. Established reference-based evaluation metrics rely on human-annotated reasoning chains as references to assess the model-derived chains. However, such {\textquotedblleft}gold-standard{\textquotedblright} human-written reasoning chains may not be unique and their acquisition is often labor-intensive. Existing reference-free reasoning evaluation metrics, while eliminating the need for human-crafted reasoning chains as references, often require fine-tuning with human-derived chains before evaluation, complicating the process and questioning their adaptability to other datasets. To address these challenges, we harness GPT-4 to automatically evaluate reasoning chain quality, thereby removing the dependency on human-written reasoning chains for both model fine-tuning and evaluative purposes. Leveraging the Socratic method, we develop SocREval (**Soc**ratic Method-Inspired **R**easoning **Eval**uation), a novel approach for prompt design in reference-free reasoning evaluation. Empirical results from four human annotated datasets reveal that SocREval significantly improves GPT-4`s performance, surpassing existing reference-free and reference-based reasoning evaluation metrics. Beyond its demonstrated efficacy, SocREval, proves to be both cost-efficient and robust to prompt writing and example selection, as substantiated by our in-depth analysis."
}
Markdown (Informal)
[SocREval: Large Language Models with the Socratic Method for Reference-free Reasoning Evaluation](https://preview.aclanthology.org/ingest_wac_2008/2024.findings-naacl.175/) (He et al., Findings 2024)
ACL