@article{bernardy-lappin-2017-using,
title = "Using Deep Neural Networks to Learn Syntactic Agreement",
author = "Bernardy, Jean-Phillipe and
Lappin, Shalom",
journal = "Linguistic Issues in Language Technology",
volume = "15",
number = "2",
year = "2017",
publisher = "CSLI Publications",
url = "https://preview.aclanthology.org/build-pipeline-with-new-library/2017.lilt-15.3/",
abstract = "We consider the extent to which different deep neural network (DNN) configurations can learn syntactic relations, by taking up Linzen et al.'s (2016) work on subject-verb agreement with LSTM RNNs. We test their methods on a much larger corpus than they used (a ⇠24 million example part of the WaCky corpus, instead of their ⇠1.35 million example corpus, both drawn from Wikipedia). We experiment with several different DNN architectures (LSTM RNNs, GRUs, and CNNs), and alternative parameter settings for these systems (vocabulary size, training to test ratio, number of layers, memory size, drop out rate, and lexical embedding dimension size). We also try out our own unsupervised DNN language model. Our results are broadly compatible with those that Linzen et al. report. However, we discovered some interesting, and in some cases, surprising features of DNNs and language models in their performance of the agreement learning task. In particular, we found that DNNs require large vocabularies to form substantive lexical embeddings in order to learn structural patterns. This finding has interesting consequences for our understanding of the way in which DNNs represent syntactic information. It suggests that DNNs learn syntactic patterns more efficiently through rich lexical embeddings, with semantic as well as syntactic cues, than from training on lexically impoverished strings that highlight structural patterns."
}
Markdown (Informal)
[Using Deep Neural Networks to Learn Syntactic Agreement](https://preview.aclanthology.org/build-pipeline-with-new-library/2017.lilt-15.3/) (Bernardy & Lappin, LILT 2017)
ACL