@inproceedings{cao-schuler-2025-larger,
title = "Are Larger Language Models Better at Disambiguation?",
author = "Cao, Ziyuan and
Schuler, William",
editor = "Kuribayashi, Tatsuki and
Rambelli, Giulia and
Takmaz, Ece and
Wicke, Philipp and
Li, Jixing and
Oh, Byung-Doh",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.20/",
pages = "155--164",
ISBN = "979-8-89176-227-5",
abstract = "Humans deal with temporary syntactic ambiguity all the time in incremental sentence processing. Sentences with temporary ambiguity that causes processing difficulties, often reflected by increase in reading time, are referred to as garden-path sentences. Garden-path theories of sentence processing attribute the increases in reading time to the reanalysis of the previously ambiguous syntactic structure to make it consistent with the new disambiguating text. It is unknown whether transformer-based language models successfully resolve the temporary ambiguity after encountering the disambiguating text. We investigated this question by analyzing completions generated from language models for a type of garden-path sentence with ambiguity between a complement clause interpretation and a relative clause interpretation. We found that larger language models are worse at resolving such ambiguity."
}
Markdown (Informal)
[Are Larger Language Models Better at Disambiguation?](https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.20/) (Cao & Schuler, CMCL 2025)
ACL
- Ziyuan Cao and William Schuler. 2025. Are Larger Language Models Better at Disambiguation?. In Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics, pages 155–164, Albuquerque, New Mexico, USA. Association for Computational Linguistics.