@inproceedings{roberts-etal-2025-large,
title = "Do Large Language Models Learn Human-Like Strategic Preferences?",
author = "Roberts, Jesse and
Moore, Kyle and
Fisher, Douglas",
editor = "Kamalloo, Ehsan and
Gontier, Nicolas and
Lu, Xing Han and
Dziri, Nouha and
Murty, Shikhar and
Lacoste, Alexandre",
booktitle = "Proceedings of the 1st Workshop for Research on Agent Language Models (REALM 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.realm-1.8/",
doi = "10.18653/v1/2025.realm-1.8",
pages = "97--108",
ISBN = "979-8-89176-264-0",
abstract = "In this paper, we evaluate whether LLMs learn to make human-like preference judgements in strategic scenarios as compared with known empirical results. Solar and Mistral are shown to exhibit stable value-based preference consistent with humans and exhibit human-like preference for cooperation in the prisoner{'}s dilemma (including stake-size effect) and traveler{'}s dilemma (including penalty-size effect). We establish a relationship between model size, value-based preference, and superficiality. Finally, results here show that models tending to be less brittle have relied on sliding window attention suggesting a potential link. Additionally, we contribute a novel method for constructing preference relations from arbitrary LLMs and support for a hypothesis regarding human behavior in the traveler{'}s dilemma."
}
Markdown (Informal)
[Do Large Language Models Learn Human-Like Strategic Preferences?](https://preview.aclanthology.org/corrections-2025-08/2025.realm-1.8/) (Roberts et al., REALM 2025)
ACL