@inproceedings{paperno-2018-limitations,
title = "Limitations in learning an interpreted language with recurrent models",
author = "Paperno, Denis",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Alishahi, Afra",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/W18-5456/",
doi = "10.18653/v1/W18-5456",
pages = "384--386",
abstract = "In this submission I report work in progress on learning simplified interpreted languages by means of recurrent models. The data is constructed to reflect core properties of natural language as modeled in formal syntax and semantics. Preliminary results suggest that LSTM networks do generalise to compositional interpretation, albeit only in the most favorable learning setting."
}
Markdown (Informal)
[Limitations in learning an interpreted language with recurrent models](https://preview.aclanthology.org/jlcl-multiple-ingestion/W18-5456/) (Paperno, EMNLP 2018)
ACL