@inproceedings{choi-etal-2025-people,
title = "People will agree what {I} think: Investigating {LLM}`s False Consensus Effect",
author = "Choi, Junhyuk and
Hong, Yeseon and
Kim, Bugeun",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.6/",
pages = "95--126",
ISBN = "979-8-89176-195-7",
abstract = "Large Language Models (LLMs) have been recently adopted in interactive systems requiring communication. As the false belief in a model can harm the usability of such systems, LLMs should not have cognitive biases that humans have. Psychologists especially focus on the False Consensus Effect (FCE), a cognitive bias where individuals overestimate the extent to which others share their beliefs or behaviors, because FCE can distract smooth communication by posing false beliefs. However, previous studies have less examined FCE in LLMs thoroughly, which needs more consideration of confounding biases, general situations, and prompt changes. Therefore, in this paper, we conduct two studies to examine the FCE phenomenon in LLMs. In Study 1, we investigate whether LLMs have FCE. In Study 2, we explore how various prompting styles affect the demonstration of FCE. As a result of these studies, we identified that popular LLMs have FCE. Also, the result specifies the conditions when FCE becomes more or less prevalent compared to normal usage."
}
Markdown (Informal)
[People will agree what I think: Investigating LLM’s False Consensus Effect](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.6/) (Choi et al., Findings 2025)
ACL