@inproceedings{tanguy-etal-2025-human,
title = "Human Alignment: How Much Do We Adapt to {LLM}s?",
author = "Tanguy, Cazalets and
Janssens, Ruben and
Belpaeme, Tony and
Dambre, Joni",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.47/",
pages = "603--613",
ISBN = "979-8-89176-252-7",
abstract = "Large Language Models (LLMs) are becoming a common part of our lives, yet few studies have examined how they influence our behavior. Using a cooperative language game in which players aim to converge on a shared word, we investigate how people adapt their communication strategies when paired with either an LLM or another human. Our study demonstrates that LLMs exert a measurable influence on human communication strategies and that humans notice and adapt to these differences irrespective of whether they are aware they are interacting with an LLM. These findings highlight the reciprocal influence of human{--}AI dialogue and raise important questions about the long-term implications of embedding LLMs in everyday communication."
}
Markdown (Informal)
[Human Alignment: How Much Do We Adapt to LLMs?](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.47/) (Tanguy et al., ACL 2025)
ACL
- Cazalets Tanguy, Ruben Janssens, Tony Belpaeme, and Joni Dambre. 2025. Human Alignment: How Much Do We Adapt to LLMs?. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 603–613, Vienna, Austria. Association for Computational Linguistics.