@inproceedings{cong-etal-2023-language,
title = "Are Language Models Sensitive to Semantic Attraction? A Study on Surprisal",
author = "Cong, Yan and
Chersoni, Emmanuele and
Hsu, Yu-yin and
Lenci, Alessandro",
editor = "Palmer, Alexis and
Camacho-collados, Jose",
booktitle = "Proceedings of the 12th Joint Conference on Lexical and Computational Semantics (*SEM 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2023.starsem-1.13/",
doi = "10.18653/v1/2023.starsem-1.13",
pages = "141--148",
abstract = "In psycholinguistics, semantic attraction is a sentence processing phenomenon in which a given argument violates the selectional requirements of a verb, but this violation is not perceived by comprehenders due to its attraction to another noun in the same sentence, which is syntactically unrelated but semantically sound. In our study, we use autoregressive language models to compute the sentence-level and the target phrase-level Surprisal scores of a psycholinguistic dataset on semantic attraction. Our results show that the models are sensitive to semantic attraction, leading to reduced Surprisal scores, although none of them perfectly matches the human behavioral pattern."
}
Markdown (Informal)
[Are Language Models Sensitive to Semantic Attraction? A Study on Surprisal](https://preview.aclanthology.org/ingest_wac_2008/2023.starsem-1.13/) (Cong et al., *SEM 2023)
ACL