@inproceedings{gulordava-etal-2018-represent,
title = "How to represent a word and predict it, too: Improving tied architectures for language modelling",
author = "Gulordava, Kristina and
Aina, Laura and
Boleda, Gemma",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/D18-1323/",
doi = "10.18653/v1/D18-1323",
pages = "2936--2941",
abstract = "Recent state-of-the-art neural language models share the representations of words given by the input and output mappings. We propose a simple modification to these architectures that decouples the hidden state from the word embedding prediction. Our architecture leads to comparable or better results compared to previous tied models and models without tying, with a much smaller number of parameters. We also extend our proposal to word2vec models, showing that tying is appropriate for general word prediction tasks."
}
Markdown (Informal)
[How to represent a word and predict it, too: Improving tied architectures for language modelling](https://preview.aclanthology.org/fix-sig-urls/D18-1323/) (Gulordava et al., EMNLP 2018)
ACL