@inproceedings{wang-etal-2025-predict,
title = "What to Predict? Exploring How Sentence Structure Influences Contrast Predictions in Humans and Large Language Models",
author = "Wang, Shuqi and
Duan, Xufeng and
Cai, Zhenguang",
editor = "Kuribayashi, Tatsuki and
Rambelli, Giulia and
Takmaz, Ece and
Wicke, Philipp and
Li, Jixing and
Oh, Byung-Doh",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.28/",
pages = "244--252",
ISBN = "979-8-89176-227-5",
abstract = "This study examines how sentence structure shapes contrast predictions in both humans and large language models (LLMs). Using Mandarin ditransitive constructions {---} double object (DO, ``She gave the girl the candy, but not...'') vs. prepositional object (PO, ``She gave the candy to the girl, but not...'') as a testbed, we employed a sentence continuation task involving three human groups (written, spoken, and prosodically normalized spoken stimuli) and three LLMs (GPT-4o, LLaMA-3, and Qwen-2.5). Two principal findings emerged: (1) Although human participants predominantly focused on the theme (e.g., ``the candy''), contrast predictions were significantly modulated by sentence structure{---}particularly in spoken contexts, where the sentence-final element drew more attention. (2) While LLMs showed a similar reliance on structure, they displayed a larger effect size and more closely resembled human spoken data than written data, indicating a stronger emphasis on linear order in generating contrast predictions. By adopting a unified psycholinguistic paradigm, this study advances our understanding of predictive language processing for both humans and LLMs and informs research on human{--}model alignment in linguistic tasks."
}
Markdown (Informal)
[What to Predict? Exploring How Sentence Structure Influences Contrast Predictions in Humans and Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.28/) (Wang et al., CMCL 2025)
ACL