@inproceedings{loula-etal-2018-rearranging,
title = "Rearranging the Familiar: Testing Compositional Generalization in Recurrent Networks",
author = "Loula, Jo{\~a}o and
Baroni, Marco and
Lake, Brenden",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5413",
doi = "10.18653/v1/W18-5413",
pages = "108--114",
abstract = "Systematic compositionality is the ability to recombine meaningful units with regular and predictable outcomes, and it{'}s seen as key to the human capacity for generalization in language. Recent work (Lake and Baroni, 2018) has studied systematic compositionality in modern seq2seq models using generalization to novel navigation instructions in a grounded environment as a probing tool. Lake and Baroni{'}s main experiment required the models to quickly bootstrap the meaning of new words. We extend this framework here to settings where the model needs only to recombine well-trained functional words (such as {``}\textit{around}{''} and {``}\textit{right}{''}) in novel contexts. Our findings confirm and strengthen the earlier ones: seq2seq models can be impressively good at generalizing to novel combinations of previously-seen input, but only when they receive extensive training on the specific pattern to be generalized (e.g., generalizing from many examples of {``}X \textit{around right}{''} to {``}\textit{jump around right}{''}), while failing when generalization requires novel application of compositional rules (e.g., inferring the meaning of {``}\textit{around right}{''} from those of {``}\textit{right}{''} and {``}\textit{around}{''}).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="loula-etal-2018-rearranging">
<titleInfo>
<title>Rearranging the Familiar: Testing Compositional Generalization in Recurrent Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Loula</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Baroni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Brenden</namePart>
<namePart type="family">Lake</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Systematic compositionality is the ability to recombine meaningful units with regular and predictable outcomes, and it’s seen as key to the human capacity for generalization in language. Recent work (Lake and Baroni, 2018) has studied systematic compositionality in modern seq2seq models using generalization to novel navigation instructions in a grounded environment as a probing tool. Lake and Baroni’s main experiment required the models to quickly bootstrap the meaning of new words. We extend this framework here to settings where the model needs only to recombine well-trained functional words (such as “around” and “right”) in novel contexts. Our findings confirm and strengthen the earlier ones: seq2seq models can be impressively good at generalizing to novel combinations of previously-seen input, but only when they receive extensive training on the specific pattern to be generalized (e.g., generalizing from many examples of “X around right” to “jump around right”), while failing when generalization requires novel application of compositional rules (e.g., inferring the meaning of “around right” from those of “right” and “around”).</abstract>
<identifier type="citekey">loula-etal-2018-rearranging</identifier>
<identifier type="doi">10.18653/v1/W18-5413</identifier>
<location>
<url>https://aclanthology.org/W18-5413</url>
</location>
<part>
<date>2018-nov</date>
<extent unit="page">
<start>108</start>
<end>114</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Rearranging the Familiar: Testing Compositional Generalization in Recurrent Networks
%A Loula, João
%A Baroni, Marco
%A Lake, Brenden
%S Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP
%D 2018
%8 nov
%I Association for Computational Linguistics
%C Brussels, Belgium
%F loula-etal-2018-rearranging
%X Systematic compositionality is the ability to recombine meaningful units with regular and predictable outcomes, and it’s seen as key to the human capacity for generalization in language. Recent work (Lake and Baroni, 2018) has studied systematic compositionality in modern seq2seq models using generalization to novel navigation instructions in a grounded environment as a probing tool. Lake and Baroni’s main experiment required the models to quickly bootstrap the meaning of new words. We extend this framework here to settings where the model needs only to recombine well-trained functional words (such as “around” and “right”) in novel contexts. Our findings confirm and strengthen the earlier ones: seq2seq models can be impressively good at generalizing to novel combinations of previously-seen input, but only when they receive extensive training on the specific pattern to be generalized (e.g., generalizing from many examples of “X around right” to “jump around right”), while failing when generalization requires novel application of compositional rules (e.g., inferring the meaning of “around right” from those of “right” and “around”).
%R 10.18653/v1/W18-5413
%U https://aclanthology.org/W18-5413
%U https://doi.org/10.18653/v1/W18-5413
%P 108-114
Markdown (Informal)
[Rearranging the Familiar: Testing Compositional Generalization in Recurrent Networks](https://aclanthology.org/W18-5413) (Loula et al., 2018)
ACL