@inproceedings{ren-etal-2024-valuebench,
title = "{V}alue{B}ench: Towards Comprehensively Evaluating Value Orientations and Understanding of Large Language Models",
author = "Ren, Yuanyi and
Ye, Haoran and
Fang, Hanjun and
Zhang, Xin and
Song, Guojie",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.acl-long.111/",
doi = "10.18653/v1/2024.acl-long.111",
pages = "2015--2040",
abstract = "Large Language Models (LLMs) are transforming diverse fields and gaining increasing influence as human proxies. This development underscores the urgent need for evaluating value orientations and understanding of LLMs to ensure their responsible integration into public-facing applications. This work introduces ValueBench, the first comprehensive psychometric benchmark for evaluating value orientations and understanding in LLMs. ValueBench collects data from 44 established psychometric inventories, encompassing 453 multifaceted value dimensions. We propose an evaluation pipeline grounded in realistic human-AI interactions to probe value orientations, along with novel tasks for evaluating value understanding in an open-ended value space. With extensive experiments conducted on six representative LLMs, we unveil their shared and distinctive value orientations and exhibit their ability to approximate expert conclusions in value-related extraction and generation tasks."
}
Markdown (Informal)
[ValueBench: Towards Comprehensively Evaluating Value Orientations and Understanding of Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2024.acl-long.111/) (Ren et al., ACL 2024)
ACL