@inproceedings{fernandez-downey-2018-sampling,
title = "Sampling Informative Training Data for {RNN} Language Models",
author = "Fernandez, Jared and
Downey, Doug",
editor = "Shwartz, Vered and
Tabassum, Jeniya and
Voigt, Rob and
Che, Wanxiang and
de Marneffe, Marie-Catherine and
Nissim, Malvina",
booktitle = "Proceedings of {ACL} 2018, Student Research Workshop",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/icon-24-ingestion/P18-3002/",
doi = "10.18653/v1/P18-3002",
pages = "9--13",
abstract = "We propose an unsupervised importance sampling approach to selecting training data for recurrent neural network (RNNs) language models. To increase the information content of the training set, our approach preferentially samples high perplexity sentences, as determined by an easily queryable n-gram language model. We experimentally evaluate the heldout perplexity of models trained with our various importance sampling distributions. We show that language models trained on data sampled using our proposed approach outperform models trained over randomly sampled subsets of both the Billion Word (Chelba et al., 2014 Wikitext-103 benchmark corpora (Merity et al., 2016)."
}
Markdown (Informal)
[Sampling Informative Training Data for RNN Language Models](https://preview.aclanthology.org/icon-24-ingestion/P18-3002/) (Fernandez & Downey, ACL 2018)
ACL