@inproceedings{wilcox-etal-2018-rnn,
title = "What do {RNN} Language Models Learn about Filler{--}Gap Dependencies?",
author = "Wilcox, Ethan and
Levy, Roger and
Morita, Takashi and
Futrell, Richard",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Alishahi, Afra",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/W18-5423/",
doi = "10.18653/v1/W18-5423",
pages = "211--221",
abstract = "RNN language models have achieved state-of-the-art perplexity results and have proven useful in a suite of NLP tasks, but it is as yet unclear what syntactic generalizations they learn. Here we investigate whether state-of-the-art RNN language models represent long-distance \textbf{filler{--}gap dependencies} and constraints on them. Examining RNN behavior on experimentally controlled sentences designed to expose filler{--}gap dependencies, we show that RNNs can represent the relationship in multiple syntactic positions and over large spans of text. Furthermore, we show that RNNs learn a subset of the known restrictions on filler{--}gap dependencies, known as \textbf{island constraints}: RNNs show evidence for wh-islands, adjunct islands, and complex NP islands. These studies demonstrates that state-of-the-art RNN models are able to learn and generalize about empty syntactic positions."
}
Markdown (Informal)
[What do RNN Language Models Learn about Filler–Gap Dependencies?](https://preview.aclanthology.org/jlcl-multiple-ingestion/W18-5423/) (Wilcox et al., EMNLP 2018)
ACL