@inproceedings{jacobs-mccarthy-2020-human,
title = "The human unlikeness of neural language models in next-word prediction",
author = "Jacobs, Cassandra L. and
McCarthy, Arya D.",
booktitle = "Proceedings of the The Fourth Widening Natural Language Processing Workshop",
month = jul,
year = "2020",
address = "Seattle, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.winlp-1.29",
doi = "10.18653/v1/2020.winlp-1.29",
pages = "115",
abstract = "The training objective of unidirectional language models (LMs) is similar to a psycholinguistic benchmark known as the cloze task, which measures next-word predictability. However, LMs lack the rich set of experiences that people do, and humans can be highly creative. To assess human parity in these models{'} training objective, we compare the predictions of three neural language models to those of human participants in a freely available behavioral dataset (Luke {\&} Christianson, 2016). Our results show that while neural models show a close correspondence to human productions, they nevertheless assign insufficient probability to how often speakers guess upcoming words, especially for open-class content words.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jacobs-mccarthy-2020-human">
<titleInfo>
<title>The human unlikeness of neural language models in next-word prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Cassandra</namePart>
<namePart type="given">L</namePart>
<namePart type="family">Jacobs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arya</namePart>
<namePart type="given">D</namePart>
<namePart type="family">McCarthy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the The Fourth Widening Natural Language Processing Workshop</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The training objective of unidirectional language models (LMs) is similar to a psycholinguistic benchmark known as the cloze task, which measures next-word predictability. However, LMs lack the rich set of experiences that people do, and humans can be highly creative. To assess human parity in these models’ training objective, we compare the predictions of three neural language models to those of human participants in a freely available behavioral dataset (Luke & Christianson, 2016). Our results show that while neural models show a close correspondence to human productions, they nevertheless assign insufficient probability to how often speakers guess upcoming words, especially for open-class content words.</abstract>
<identifier type="citekey">jacobs-mccarthy-2020-human</identifier>
<identifier type="doi">10.18653/v1/2020.winlp-1.29</identifier>
<location>
<url>https://aclanthology.org/2020.winlp-1.29</url>
</location>
<part>
<date>2020-jul</date>
<detail type="page"><number>115</number></detail>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The human unlikeness of neural language models in next-word prediction
%A Jacobs, Cassandra L.
%A McCarthy, Arya D.
%S Proceedings of the The Fourth Widening Natural Language Processing Workshop
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Seattle, USA
%F jacobs-mccarthy-2020-human
%X The training objective of unidirectional language models (LMs) is similar to a psycholinguistic benchmark known as the cloze task, which measures next-word predictability. However, LMs lack the rich set of experiences that people do, and humans can be highly creative. To assess human parity in these models’ training objective, we compare the predictions of three neural language models to those of human participants in a freely available behavioral dataset (Luke & Christianson, 2016). Our results show that while neural models show a close correspondence to human productions, they nevertheless assign insufficient probability to how often speakers guess upcoming words, especially for open-class content words.
%R 10.18653/v1/2020.winlp-1.29
%U https://aclanthology.org/2020.winlp-1.29
%U https://doi.org/10.18653/v1/2020.winlp-1.29
%P 115
Markdown (Informal)
[The human unlikeness of neural language models in next-word prediction](https://aclanthology.org/2020.winlp-1.29) (Jacobs & McCarthy, WiNLP 2020)
ACL