@inproceedings{kovaleva-etal-2018-similarity,
title = "Similarity-Based Reconstruction Loss for Meaning Representation",
author = "Kovaleva, Olga and
Rumshisky, Anna and
Romanov, Alexey",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/D18-1525/",
doi = "10.18653/v1/D18-1525",
pages = "4875--4880",
abstract = "This paper addresses the problem of representation learning. Using an autoencoder framework, we propose and evaluate several loss functions that can be used as an alternative to the commonly used cross-entropy reconstruction loss. The proposed loss functions use similarities between words in the embedding space, and can be used to train any neural model for text generation. We show that the introduced loss functions amplify semantic diversity of reconstructed sentences, while preserving the original meaning of the input. We test the derived autoencoder-generated representations on paraphrase detection and language inference tasks and demonstrate performance improvement compared to the traditional cross-entropy loss."
}
Markdown (Informal)
[Similarity-Based Reconstruction Loss for Meaning Representation](https://preview.aclanthology.org/fix-sig-urls/D18-1525/) (Kovaleva et al., EMNLP 2018)
ACL