@inproceedings{yang-etal-2020-robust,
title = "Robust and Interpretable Grounding of Spatial References with Relation Networks",
author = "Yang, Tsung-Yen and
Lan, Andrew and
Narasimhan, Karthik",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.172",
doi = "10.18653/v1/2020.findings-emnlp.172",
pages = "1908--1923",
abstract = "Learning representations of spatial references in natural language is a key challenge in tasks like autonomous navigation and robotic manipulation. Recent work has investigated various neural architectures for learning multi-modal representations for spatial concepts. However, the lack of explicit reasoning over entities makes such approaches vulnerable to noise in input text or state observations. In this paper, we develop effective models for understanding spatial references in text that are robust and interpretable, without sacrificing performance. We design a text-conditioned relation network whose parameters are dynamically computed with a cross-modal attention module to capture fine-grained spatial relations between entities. This design choice provides interpretability of learned intermediate outputs. Experiments across three tasks demonstrate that our model achieves superior performance, with a 17{\%} improvement in predicting goal locations and a 15{\%} improvement in robustness compared to state-of-the-art systems.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2020-robust">
<titleInfo>
<title>Robust and Interpretable Grounding of Spatial References with Relation Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tsung-Yen</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Lan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Narasimhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Learning representations of spatial references in natural language is a key challenge in tasks like autonomous navigation and robotic manipulation. Recent work has investigated various neural architectures for learning multi-modal representations for spatial concepts. However, the lack of explicit reasoning over entities makes such approaches vulnerable to noise in input text or state observations. In this paper, we develop effective models for understanding spatial references in text that are robust and interpretable, without sacrificing performance. We design a text-conditioned relation network whose parameters are dynamically computed with a cross-modal attention module to capture fine-grained spatial relations between entities. This design choice provides interpretability of learned intermediate outputs. Experiments across three tasks demonstrate that our model achieves superior performance, with a 17% improvement in predicting goal locations and a 15% improvement in robustness compared to state-of-the-art systems.</abstract>
<identifier type="citekey">yang-etal-2020-robust</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.172</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.172</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>1908</start>
<end>1923</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Robust and Interpretable Grounding of Spatial References with Relation Networks
%A Yang, Tsung-Yen
%A Lan, Andrew
%A Narasimhan, Karthik
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F yang-etal-2020-robust
%X Learning representations of spatial references in natural language is a key challenge in tasks like autonomous navigation and robotic manipulation. Recent work has investigated various neural architectures for learning multi-modal representations for spatial concepts. However, the lack of explicit reasoning over entities makes such approaches vulnerable to noise in input text or state observations. In this paper, we develop effective models for understanding spatial references in text that are robust and interpretable, without sacrificing performance. We design a text-conditioned relation network whose parameters are dynamically computed with a cross-modal attention module to capture fine-grained spatial relations between entities. This design choice provides interpretability of learned intermediate outputs. Experiments across three tasks demonstrate that our model achieves superior performance, with a 17% improvement in predicting goal locations and a 15% improvement in robustness compared to state-of-the-art systems.
%R 10.18653/v1/2020.findings-emnlp.172
%U https://aclanthology.org/2020.findings-emnlp.172
%U https://doi.org/10.18653/v1/2020.findings-emnlp.172
%P 1908-1923
Markdown (Informal)
[Robust and Interpretable Grounding of Spatial References with Relation Networks](https://aclanthology.org/2020.findings-emnlp.172) (Yang et al., Findings 2020)
ACL