@inproceedings{braun-2025-acquiescence,
title = "Acquiescence Bias in Large Language Models",
author = "Braun, Daniel",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.607/",
doi = "10.18653/v1/2025.findings-emnlp.607",
pages = "11341--11355",
ISBN = "979-8-89176-335-7",
abstract = "Acquiescence bias, i.e. the tendency of humans to agree with statements in surveys, independent of their actual beliefs, is well researched and documented. Since Large Language Models (LLMs) have been shown to be very influenceable by relatively small changes in input and are trained on human-generated data, it is reasonable to assume that they could show a similar tendency. We present a study investigating the presence of acquiescence bias in LLMs across different models, tasks, and languages (English, German, and Polish). Our results indicate that, contrary to humans, LLMs display a bias towards answering no, regardless of whether it indicates agreement or disagreement."
}Markdown (Informal)
[Acquiescence Bias in Large Language Models](https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.607/) (Braun, Findings 2025)
ACL
- Daniel Braun. 2025. Acquiescence Bias in Large Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 11341–11355, Suzhou, China. Association for Computational Linguistics.