@inproceedings{elle-2025-reward,
title = "Reward Model Perspectives: Whose Opinions Do Reward Models Reward?",
author = "Elle",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.754/",
doi = "10.18653/v1/2025.emnlp-main.754",
pages = "14931--14955",
ISBN = "979-8-89176-332-6",
abstract = "Reward models (RMs) are central to the alignment of language models (LMs). An RM often serves as a proxy for human preferences to guide downstream LM behavior. However, our understanding of RM behavior is limited. Our work (i) formalizes a framework for measuring the alignment of opinions captured by RMs, (ii) investigates the extent to which RMs demonstrate sociodemographic biases, and (iii) explores the effects of prompting to steer rewards towards the preferences of a target group. We study the subjective and diverse perspectives on controversial topics, which allows us to quantify $\textit{RM perspectives}$ in terms of their opinions, attitudes, and values. We show that RMs are poorly aligned with several demographic groups and can systematically reward harmful stereotypes, and steering alone is not enough to overcome these limitations. Our findings underscore the need for more careful consideration of RM behavior in model alignment during preference learning to prevent the propagation of unwanted social biases in the language technologies that we use."
}Markdown (Informal)
[Reward Model Perspectives: Whose Opinions Do Reward Models Reward?](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.754/) (Elle, EMNLP 2025)
ACL