@inproceedings{romano-etal-2025-evaluating,
title = "Evaluating the Role of Verifiers in Test-Time Scaling for Legal Reasoning Tasks",
author = "Romano, Davide and
Schwarz, Jonathan Richard and
Giofr{\`e}, Daniele",
editor = "Aletras, Nikolaos and
Chalkidis, Ilias and
Barrett, Leslie and
Goanț{\u{a}}, C{\u{a}}t{\u{a}}lina and
Preoțiuc-Pietro, Daniel and
Spanakis, Gerasimos",
booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.nllp-1.15/",
pages = "207--225",
ISBN = "979-8-89176-338-8",
abstract = "Test-time scaling (TTS) techniques can improve the performance of large language models (LLMs) at the expense of additional computation and latency. While TTS has proven effective in formal domains such as mathematics and programming (Snell et al., 2024; Chen et al., 2024), its value in argumentative domains such as law remains underexplored. We present an empirical study of verifier-based TTS methods for legal multiple-choice QA (MCQA) across five benchmarks. Using a family of 7 reward models, we evaluate both outcome-level (Best-of-$N$) and process-level (tree search) verification under realistic low-$N$ budgets. Our analysis systematically investigates how verifier utility is affected by key properties such as domain specialization, model size, and supervision type (process-supervised PRMs vs. outcome-only ORMs), even when applied across different roles."
}Markdown (Informal)
[Evaluating the Role of Verifiers in Test-Time Scaling for Legal Reasoning Tasks](https://preview.aclanthology.org/ingest-emnlp/2025.nllp-1.15/) (Romano et al., NLLP 2025)
ACL