@inproceedings{liu-etal-2025-evaluating,
title = "Evaluating and Aligning Human Economic Risk Preferences in {LLM}s",
author = "Liu, Jiaxin and
Tang, Yixuan and
Yang, Yi and
Tam, Kar Yan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.917/",
pages = "18185--18199",
ISBN = "979-8-89176-332-6",
abstract = "Large Language Models (LLMs) are increasingly used in decision-making scenarios that involve risk assessment, yet their alignment with human economic rationality remains unclear. In this study, we investigate whether LLMs exhibit risk preferences consistent with human expectations across different personas. Specifically, we propose an evaluation metric called Risk Disparity Score (RDS) and assess whether LLM-generated responses reflect appropriate levels of risk aversion or risk-seeking behavior based on individual{'}s persona. Our results reveal that while LLMs make reasonable decisions in simplified, personalized risk contexts, their performance declines in more complex economic decision-making tasks. To address this, we test whether current state-of-art alignment methods such as Direct Preference Optimization(DPO) and In Context Learning(ICL) can enhance LLM adherence to persona-specific risk preferences. We find DPO can improve the economic rationality of LLMs in loss-related parameters, offering a step toward more human-aligned AI decision-making."
}Markdown (Informal)
[Evaluating and Aligning Human Economic Risk Preferences in LLMs](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.917/) (Liu et al., EMNLP 2025)
ACL