@inproceedings{eisape-etal-2020-cloze,
title = "Cloze Distillation: Improving Neural Language Models with Human Next-Word Prediction",
author = "Eisape, Tiwalayo and
Zaslavsky, Noga and
Levy, Roger",
booktitle = "Proceedings of the 24th Conference on Computational Natural Language Learning",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.conll-1.49",
doi = "10.18653/v1/2020.conll-1.49",
pages = "609--619",
abstract = "Contemporary autoregressive language models (LMs) trained purely on corpus data have been shown to capture numerous features of human incremental processing. However, past work has also suggested dissociations between corpus probabilities and human next-word predictions. Here we evaluate several state-of-the-art language models for their match to human next-word predictions and to reading time behavior from eye movements. We then propose a novel method for distilling the linguistic information implicit in human linguistic predictions into pre-trained LMs: Cloze Distillation. We apply this method to a baseline neural LM and show potential improvement in reading time prediction and generalization to held-out human cloze data.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="eisape-etal-2020-cloze">
<titleInfo>
<title>Cloze Distillation: Improving Neural Language Models with Human Next-Word Prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tiwalayo</namePart>
<namePart type="family">Eisape</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Noga</namePart>
<namePart type="family">Zaslavsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roger</namePart>
<namePart type="family">Levy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 24th Conference on Computational Natural Language Learning</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Contemporary autoregressive language models (LMs) trained purely on corpus data have been shown to capture numerous features of human incremental processing. However, past work has also suggested dissociations between corpus probabilities and human next-word predictions. Here we evaluate several state-of-the-art language models for their match to human next-word predictions and to reading time behavior from eye movements. We then propose a novel method for distilling the linguistic information implicit in human linguistic predictions into pre-trained LMs: Cloze Distillation. We apply this method to a baseline neural LM and show potential improvement in reading time prediction and generalization to held-out human cloze data.</abstract>
<identifier type="citekey">eisape-etal-2020-cloze</identifier>
<identifier type="doi">10.18653/v1/2020.conll-1.49</identifier>
<location>
<url>https://aclanthology.org/2020.conll-1.49</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>609</start>
<end>619</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Cloze Distillation: Improving Neural Language Models with Human Next-Word Prediction
%A Eisape, Tiwalayo
%A Zaslavsky, Noga
%A Levy, Roger
%S Proceedings of the 24th Conference on Computational Natural Language Learning
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F eisape-etal-2020-cloze
%X Contemporary autoregressive language models (LMs) trained purely on corpus data have been shown to capture numerous features of human incremental processing. However, past work has also suggested dissociations between corpus probabilities and human next-word predictions. Here we evaluate several state-of-the-art language models for their match to human next-word predictions and to reading time behavior from eye movements. We then propose a novel method for distilling the linguistic information implicit in human linguistic predictions into pre-trained LMs: Cloze Distillation. We apply this method to a baseline neural LM and show potential improvement in reading time prediction and generalization to held-out human cloze data.
%R 10.18653/v1/2020.conll-1.49
%U https://aclanthology.org/2020.conll-1.49
%U https://doi.org/10.18653/v1/2020.conll-1.49
%P 609-619
Markdown (Informal)
[Cloze Distillation: Improving Neural Language Models with Human Next-Word Prediction](https://aclanthology.org/2020.conll-1.49) (Eisape et al., CoNLL 2020)
ACL