@inproceedings{tu-etal-2017-learning,
title = "Learning to Embed Words in Context for Syntactic Tasks",
author = "Tu, Lifu and
Gimpel, Kevin and
Livescu, Karen",
editor = "Blunsom, Phil and
Bordes, Antoine and
Cho, Kyunghyun and
Cohen, Shay and
Dyer, Chris and
Grefenstette, Edward and
Hermann, Karl Moritz and
Rimell, Laura and
Weston, Jason and
Yih, Scott",
booktitle = "Proceedings of the 2nd Workshop on Representation Learning for {NLP}",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/W17-2632/",
doi = "10.18653/v1/W17-2632",
pages = "265--275",
abstract = "We present models for embedding words in the context of surrounding words. Such models, which we refer to as token embeddings, represent the characteristics of a word that are specific to a given context, such as word sense, syntactic category, and semantic role. We explore simple, efficient token embedding models based on standard neural network architectures. We learn token embeddings on a large amount of unannotated text and evaluate them as features for part-of-speech taggers and dependency parsers trained on much smaller amounts of annotated data. We find that predictors endowed with token embeddings consistently outperform baseline predictors across a range of context window and training set sizes."
}
Markdown (Informal)
[Learning to Embed Words in Context for Syntactic Tasks](https://preview.aclanthology.org/jlcl-multiple-ingestion/W17-2632/) (Tu et al., RepL4NLP 2017)
ACL