@inproceedings{baltaji-etal-2024-conformity,
title = "Conformity, Confabulation, and Impersonation: Persona Inconstancy in Multi-Agent {LLM} Collaboration",
author = "Baltaji, Razan and
Hemmatian, Babak and
Varshney, Lav",
editor = "Prabhakaran, Vinodkumar and
Dev, Sunipa and
Benotti, Luciana and
Hershcovich, Daniel and
Cabello, Laura and
Cao, Yong and
Adebara, Ife and
Zhou, Li",
booktitle = "Proceedings of the 2nd Workshop on Cross-Cultural Considerations in NLP",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.c3nlp-1.2",
doi = "10.18653/v1/2024.c3nlp-1.2",
pages = "17--31",
abstract = "This study explores the sources of instability in maintaining cultural personas and opinions within multi-agent LLM systems. Drawing on simulations of inter-cultural collaboration and debate, we analyze agents{'} pre- and post-discussion private responses alongside chat transcripts to assess the stability of cultural personas and the impact of opinion diversity on group outcomes. Our findings suggest that multi-agent discussions can encourage collective decisions that reflect diverse perspectives, yet this benefit is tempered by the agents{'} susceptibility to conformity due to perceived peer pressure and challenges in maintaining consistent personas and opinions. Counterintuitively, instructions that encourage debate in support of one{'}s opinions increase the rate of instability. Without addressing the factors we identify, the full potential of multi-agent frameworks for producing more culturally diverse AI outputs will remain untapped.",
}
Markdown (Informal)
[Conformity, Confabulation, and Impersonation: Persona Inconstancy in Multi-Agent LLM Collaboration](https://aclanthology.org/2024.c3nlp-1.2) (Baltaji et al., C3NLP-WS 2024)
ACL