@inproceedings{herbelot-baroni-2017-high,
title = "High-risk learning: acquiring new word vectors from tiny data",
author = "Herbelot, Aur{\'e}lie and
Baroni, Marco",
editor = "Palmer, Martha and
Hwa, Rebecca and
Riedel, Sebastian",
booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/D17-1030/",
doi = "10.18653/v1/D17-1030",
pages = "304--309",
abstract = "Distributional semantics models are known to struggle with small data. It is generally accepted that in order to learn {\textquoteleft}a good vector' for a word, a model must have sufficient examples of its usage. This contradicts the fact that humans can guess the meaning of a word from a few occurrences only. In this paper, we show that a neural language model such as Word2Vec only necessitates minor modifications to its standard architecture to learn new terms from tiny data, using background knowledge from a previously learnt semantic space. We test our model on word definitions and on a nonce task involving 2-6 sentences' worth of context, showing a large increase in performance over state-of-the-art models on the definitional task."
}
Markdown (Informal)
[High-risk learning: acquiring new word vectors from tiny data](https://preview.aclanthology.org/add-emnlp-2024-awards/D17-1030/) (Herbelot & Baroni, EMNLP 2017)
ACL