@inproceedings{borah-etal-2025-mind,
title = "Mind the (Belief) Gap: Group Identity in the World of {LLM}s",
author = "Borah, Angana and
Houalla, Marwa and
Mihalcea, Rada",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.948/",
pages = "18441--18463",
ISBN = "979-8-89176-256-5",
abstract = "Social biases and belief-driven behaviors can significantly impact Large Language Models' (LLMs') decisions on several tasks. As LLMs are increasingly used in multi-agent systems for societal simulations, their ability to model fundamental group psychological characteristics remains critical yet under-explored. In this study, we present a multi-agent framework that simulates belief congruence, a classical group psychology theory that plays a crucial role in shaping societal interactions and preferences. Our findings reveal that LLMs exhibit amplified belief congruence compared to humans, across diverse contexts. We further investigate the implications of this behavior on two downstream tasks: (1) misinformation dissemination and (2) LLM learning, finding that belief congruence in LLMs increases misinformation dissemination and impedes learning. To mitigate these negative impacts, we propose strategies inspired by: (1) contact hypothesis, (2) accuracy nudges, and (3) global citizenship framework. Our results show that the best strategies reduce misinformation dissemination by up to (37{\%}) and enhance learning by (11{\%}). Bridging social psychology and AI, our work provides insights to navigate real-world interactions using LLMs while addressing belief-driven biases."
}
Markdown (Informal)
[Mind the (Belief) Gap: Group Identity in the World of LLMs](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.948/) (Borah et al., Findings 2025)
ACL