@inproceedings{kuncoro-etal-2018-lstms,
title = "{LSTM}s Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better",
author = "Kuncoro, Adhiguna and
Dyer, Chris and
Hale, John and
Yogatama, Dani and
Clark, Stephen and
Blunsom, Phil",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-1132",
doi = "10.18653/v1/P18-1132",
pages = "1426--1436",
abstract = "Language exhibits hierarchical structure, but recent work using a subject-verb agreement diagnostic argued that state-of-the-art language models, LSTMs, fail to learn long-range syntax sensitive dependencies. Using the same diagnostic, we show that, in fact, LSTMs do succeed in learning such dependencies{---}provided they have enough capacity. We then explore whether models that have access to explicit syntactic information learn agreement more effectively, and how the way in which this structural information is incorporated into the model impacts performance. We find that the mere presence of syntactic information does not improve accuracy, but when model architecture is determined by syntax, number agreement is improved. Further, we find that the choice of how syntactic structure is built affects how well number agreement is learned: top-down construction outperforms left-corner and bottom-up variants in capturing non-local structural dependencies.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kuncoro-etal-2018-lstms">
<titleInfo>
<title>LSTMs Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better</title>
</titleInfo>
<name type="personal">
<namePart type="given">Adhiguna</namePart>
<namePart type="family">Kuncoro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="family">Dyer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Hale</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dani</namePart>
<namePart type="family">Yogatama</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stephen</namePart>
<namePart type="family">Clark</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Phil</namePart>
<namePart type="family">Blunsom</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language exhibits hierarchical structure, but recent work using a subject-verb agreement diagnostic argued that state-of-the-art language models, LSTMs, fail to learn long-range syntax sensitive dependencies. Using the same diagnostic, we show that, in fact, LSTMs do succeed in learning such dependencies—provided they have enough capacity. We then explore whether models that have access to explicit syntactic information learn agreement more effectively, and how the way in which this structural information is incorporated into the model impacts performance. We find that the mere presence of syntactic information does not improve accuracy, but when model architecture is determined by syntax, number agreement is improved. Further, we find that the choice of how syntactic structure is built affects how well number agreement is learned: top-down construction outperforms left-corner and bottom-up variants in capturing non-local structural dependencies.</abstract>
<identifier type="citekey">kuncoro-etal-2018-lstms</identifier>
<identifier type="doi">10.18653/v1/P18-1132</identifier>
<location>
<url>https://aclanthology.org/P18-1132</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>1426</start>
<end>1436</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LSTMs Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better
%A Kuncoro, Adhiguna
%A Dyer, Chris
%A Hale, John
%A Yogatama, Dani
%A Clark, Stephen
%A Blunsom, Phil
%S Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F kuncoro-etal-2018-lstms
%X Language exhibits hierarchical structure, but recent work using a subject-verb agreement diagnostic argued that state-of-the-art language models, LSTMs, fail to learn long-range syntax sensitive dependencies. Using the same diagnostic, we show that, in fact, LSTMs do succeed in learning such dependencies—provided they have enough capacity. We then explore whether models that have access to explicit syntactic information learn agreement more effectively, and how the way in which this structural information is incorporated into the model impacts performance. We find that the mere presence of syntactic information does not improve accuracy, but when model architecture is determined by syntax, number agreement is improved. Further, we find that the choice of how syntactic structure is built affects how well number agreement is learned: top-down construction outperforms left-corner and bottom-up variants in capturing non-local structural dependencies.
%R 10.18653/v1/P18-1132
%U https://aclanthology.org/P18-1132
%U https://doi.org/10.18653/v1/P18-1132
%P 1426-1436
Markdown (Informal)
[LSTMs Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better](https://aclanthology.org/P18-1132) (Kuncoro et al., ACL 2018)
ACL