@inproceedings{xie-etal-2025-outcomes,
title = "From Outcomes to Processes: Guiding {PRM} Learning from {ORM} for Inference-Time Alignment",
author = "Xie, Bin and
Xu, Bingbing and
Yuan, Yige and
Zhu, Shengmao and
Shen, Huawei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/transition-to-people-yaml/2025.acl-long.946/",
doi = "10.18653/v1/2025.acl-long.946",
pages = "19291--19307",
ISBN = "979-8-89176-251-0",
abstract = "Inference-time alignment methods have gained significant attention for their efficiency and effectiveness in aligning large language models (LLMs) with human preferences. However, existing dominant approaches using reward-guided search (RGS) primarily rely on outcome reward models (ORMs), which suffer from a critical granularity mismatch: ORMs are designed to provide outcome rewards for complete responses, while RGS methods rely on process rewards to guide the policy, leading to inconsistent scoring and suboptimal alignment. To address this challenge, we introduce process reward models (PRMs) into RGS and argue that an ideal PRM should satisfy two objectives: Score Consistency, ensuring coherent evaluation across partial and complete responses, and Preference Consistency, aligning partial sequence assessments with human preferences. Based on these, we propose \textbf{SP-PRM}, a novel dual-consistency framework integrating score consistency-based and preference consistency-based partial evaluation modules without relying on human annotation. Extensive experiments on dialogue, summarization, and reasoning tasks demonstrate that SP-PRM substantially enhances existing RGS methods, achieving a 3.6{\%}-10.3{\%} improvement in GPT-4 evaluation scores across all tasks. Code is publicly available at \url{https://github.com/xiebin23/SP-PRM}."
}
Markdown (Informal)
[From Outcomes to Processes: Guiding PRM Learning from ORM for Inference-Time Alignment](https://preview.aclanthology.org/transition-to-people-yaml/2025.acl-long.946/) (Xie et al., ACL 2025)
ACL