@inproceedings{keles-deniz-2025-men,
title = "When Men Bite Dogs: Testing Good-Enough Parsing in {T}urkish with Humans and Large Language Models",
author = "Kele{\c{s}}, Onur and
Deniz, Nazik Dinctopal",
editor = "Kuribayashi, Tatsuki and
Rambelli, Giulia and
Takmaz, Ece and
Wicke, Philipp and
Li, Jixing and
Oh, Byung-Doh",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.26/",
pages = "219--231",
ISBN = "979-8-89176-227-5",
abstract = "This paper investigates good-enough parsing in Turkish by comparing human self-paced reading performance to the surprisal and attention patterns of three Turkish Large Language Models (LLMs), GPT-2-Base, GPT-2-Large, and LLaMA-3. The results show that Turkish speakers rely on good-enough parsing for implausible but grammatically permissible sentences (e.g., interpreting sentences such as `the man bit the dog' as `the dog bit the man'). Although the smaller LLMs (e.g., GPT-2) were better predictors of human RTs, they seem to have relied more heavily on semantic plausibility than humans. Comparably, larger LLMs (e.g., LLaMA-3) tended to make more probabilistic parsing based on word order, exhibiting less \textit{good-enough} parsing behavior. Therefore, we conclude that LLMs take syntactic and semantic constraints into account when processing thematic roles, but not to the same extent as human parsers."
}
Markdown (Informal)
[When Men Bite Dogs: Testing Good-Enough Parsing in Turkish with Humans and Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.26/) (Keleş & Deniz, CMCL 2025)
ACL