@inproceedings{schmidt-2019-generalization,
title = "Generalization in Generation: A closer look at Exposure Bias",
author = "Schmidt, Florian",
booktitle = "Proceedings of the 3rd Workshop on Neural Generation and Translation",
month = nov,
year = "2019",
address = "Hong Kong",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-5616",
doi = "10.18653/v1/D19-5616",
pages = "157--167",
abstract = "Exposure bias refers to the train-test discrepancy that seemingly arises when an autoregressive generative model uses only ground-truth contexts at training time but generated ones at test time. We separate the contribution of the learning framework and the model to clarify the debate on consequences and review proposed counter-measures. In this light, we argue that generalization is the underlying property to address and propose unconditional generation as its fundamental benchmark. Finally, we combine latent variable modeling with a recent formulation of exploration in reinforcement learning to obtain a rigorous handling of true and generated contexts. Results on language modeling and variational sentence auto-encoding confirm the model{'}s generalization capability.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="schmidt-2019-generalization">
<titleInfo>
<title>Generalization in Generation: A closer look at Exposure Bias</title>
</titleInfo>
<name type="personal">
<namePart type="given">Florian</namePart>
<namePart type="family">Schmidt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Neural Generation and Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Exposure bias refers to the train-test discrepancy that seemingly arises when an autoregressive generative model uses only ground-truth contexts at training time but generated ones at test time. We separate the contribution of the learning framework and the model to clarify the debate on consequences and review proposed counter-measures. In this light, we argue that generalization is the underlying property to address and propose unconditional generation as its fundamental benchmark. Finally, we combine latent variable modeling with a recent formulation of exploration in reinforcement learning to obtain a rigorous handling of true and generated contexts. Results on language modeling and variational sentence auto-encoding confirm the model’s generalization capability.</abstract>
<identifier type="citekey">schmidt-2019-generalization</identifier>
<identifier type="doi">10.18653/v1/D19-5616</identifier>
<location>
<url>https://aclanthology.org/D19-5616</url>
</location>
<part>
<date>2019-nov</date>
<extent unit="page">
<start>157</start>
<end>167</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Generalization in Generation: A closer look at Exposure Bias
%A Schmidt, Florian
%S Proceedings of the 3rd Workshop on Neural Generation and Translation
%D 2019
%8 nov
%I Association for Computational Linguistics
%C Hong Kong
%F schmidt-2019-generalization
%X Exposure bias refers to the train-test discrepancy that seemingly arises when an autoregressive generative model uses only ground-truth contexts at training time but generated ones at test time. We separate the contribution of the learning framework and the model to clarify the debate on consequences and review proposed counter-measures. In this light, we argue that generalization is the underlying property to address and propose unconditional generation as its fundamental benchmark. Finally, we combine latent variable modeling with a recent formulation of exploration in reinforcement learning to obtain a rigorous handling of true and generated contexts. Results on language modeling and variational sentence auto-encoding confirm the model’s generalization capability.
%R 10.18653/v1/D19-5616
%U https://aclanthology.org/D19-5616
%U https://doi.org/10.18653/v1/D19-5616
%P 157-167
Markdown (Informal)
[Generalization in Generation: A closer look at Exposure Bias](https://aclanthology.org/D19-5616) (Schmidt, EMNLP 2019)
ACL