@inproceedings{brarda-etal-2017-sequential,
title = "Sequential Attention: A Context-Aware Alignment Function for Machine Reading",
author = "Brarda, Sebastian and
Yeres, Philip and
Bowman, Samuel",
booktitle = "Proceedings of the 2nd Workshop on Representation Learning for {NLP}",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-2610",
doi = "10.18653/v1/W17-2610",
pages = "75--80",
abstract = "In this paper we propose a neural network model with a novel Sequential Attention layer that extends soft attention by assigning weights to words in an input sequence in a way that takes into account not just how well that word matches a query, but how well surrounding words match. We evaluate this approach on the task of reading comprehension (on the Who did What and CNN datasets) and show that it dramatically improves a strong baseline{---}the Stanford Reader{---}and is competitive with the state of the art.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="brarda-etal-2017-sequential">
<titleInfo>
<title>Sequential Attention: A Context-Aware Alignment Function for Machine Reading</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Brarda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philip</namePart>
<namePart type="family">Yeres</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samuel</namePart>
<namePart type="family">Bowman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-aug</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Representation Learning for NLP</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vancouver, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we propose a neural network model with a novel Sequential Attention layer that extends soft attention by assigning weights to words in an input sequence in a way that takes into account not just how well that word matches a query, but how well surrounding words match. We evaluate this approach on the task of reading comprehension (on the Who did What and CNN datasets) and show that it dramatically improves a strong baseline—the Stanford Reader—and is competitive with the state of the art.</abstract>
<identifier type="citekey">brarda-etal-2017-sequential</identifier>
<identifier type="doi">10.18653/v1/W17-2610</identifier>
<location>
<url>https://aclanthology.org/W17-2610</url>
</location>
<part>
<date>2017-aug</date>
<extent unit="page">
<start>75</start>
<end>80</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sequential Attention: A Context-Aware Alignment Function for Machine Reading
%A Brarda, Sebastian
%A Yeres, Philip
%A Bowman, Samuel
%S Proceedings of the 2nd Workshop on Representation Learning for NLP
%D 2017
%8 aug
%I Association for Computational Linguistics
%C Vancouver, Canada
%F brarda-etal-2017-sequential
%X In this paper we propose a neural network model with a novel Sequential Attention layer that extends soft attention by assigning weights to words in an input sequence in a way that takes into account not just how well that word matches a query, but how well surrounding words match. We evaluate this approach on the task of reading comprehension (on the Who did What and CNN datasets) and show that it dramatically improves a strong baseline—the Stanford Reader—and is competitive with the state of the art.
%R 10.18653/v1/W17-2610
%U https://aclanthology.org/W17-2610
%U https://doi.org/10.18653/v1/W17-2610
%P 75-80
Markdown (Informal)
[Sequential Attention: A Context-Aware Alignment Function for Machine Reading](https://aclanthology.org/W17-2610) (Brarda et al., 2017)
ACL