@inproceedings{chiang-etal-2020-understanding,
title = "Understanding the Source of Semantic Regularities in Word Embeddings",
author = "Chiang, Hsiao-Yu and
Camacho-Collados, Jose and
Pardos, Zachary",
editor = "Fern{\'a}ndez, Raquel and
Linzen, Tal",
booktitle = "Proceedings of the 24th Conference on Computational Natural Language Learning",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.conll-1.9/",
doi = "10.18653/v1/2020.conll-1.9",
pages = "119--131",
abstract = "Semantic relations are core to how humans understand and express concepts in the real world using language. Recently, there has been a thread of research aimed at modeling these relations by learning vector representations from text corpora. Most of these approaches focus strictly on leveraging the co-occurrences of relationship word pairs within sentences. In this paper, we investigate the hypothesis that examples of a lexical relation in a corpus are fundamental to a neural word embedding{'}s ability to complete analogies involving the relation. Our experiments, in which we remove all known examples of a relation from training corpora, show only marginal degradation in analogy completion performance involving the removed relation. This finding enhances our understanding of neural word embeddings, showing that co-occurrence information of a particular semantic relation is not the main source of their structural regularity."
}
Markdown (Informal)
[Understanding the Source of Semantic Regularities in Word Embeddings](https://preview.aclanthology.org/fix-sig-urls/2020.conll-1.9/) (Chiang et al., CoNLL 2020)
ACL