@inproceedings{utsumi-2018-refining,
title = "Refining Pretrained Word Embeddings Using Layer-wise Relevance Propagation",
author = "Utsumi, Akira",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/D18-1520/",
doi = "10.18653/v1/D18-1520",
pages = "4840--4846",
abstract = "In this paper, we propose a simple method for refining pretrained word embeddings using layer-wise relevance propagation. Given a target semantic representation one would like word vectors to reflect, our method first trains the mapping between the original word vectors and the target representation using a neural network. Estimated target values are then propagated backward toward word vectors, and a relevance score is computed for each dimension of word vectors. Finally, the relevance score vectors are used to refine the original word vectors so that they are projected into the subspace that reflects the information relevant to the target representation. The evaluation experiment using binary classification of word pairs demonstrates that the refined vectors by our method achieve the higher performance than the original vectors."
}
Markdown (Informal)
[Refining Pretrained Word Embeddings Using Layer-wise Relevance Propagation](https://preview.aclanthology.org/fix-sig-urls/D18-1520/) (Utsumi, EMNLP 2018)
ACL