@inproceedings{lam-etal-2025-leveraging,
title = "Leveraging Human Production-Interpretation Asymmetries to Test {LLM} Cognitive Plausibility",
author = "Lam, Suet-Ying and
Zeng, Qingcheng and
Wu, Jingyi and
Voigt, Rob",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.14/",
pages = "158--171",
ISBN = "979-8-89176-252-7",
abstract = "Whether large language models (LLMs) process language similarly to humans has been the subject of much theoretical and practical debate. We examine this question through the lens of the production-interpretation distinction found in human sentence processing and evaluate the extent to which instruction-tuned LLMs replicate this distinction. Using an empirically documented asymmetry between pronoun production and interpretation in humans for implicit causality verbs as a testbed, we find that some LLMs do quantitatively and qualitatively reflect human-like asymmetries between production and interpretation. We demonstrate that whether this behavior holds depends upon both model size-with larger models more likely to reflect human-like patterns and the choice of meta-linguistic prompts used to elicit the behavior. Our codes and results are available here."
}
Markdown (Informal)
[Leveraging Human Production-Interpretation Asymmetries to Test LLM Cognitive Plausibility](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.14/) (Lam et al., ACL 2025)
ACL