@inproceedings{indurthi-etal-2019-look,
title = "Look Harder: A Neural Machine Translation Model with Hard Attention",
author = "Indurthi, Sathish Reddy and
Chung, Insoo and
Kim, Sangha",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1290",
doi = "10.18653/v1/P19-1290",
pages = "3037--3043",
abstract = "Soft-attention based Neural Machine Translation (NMT) models have achieved promising results on several translation tasks. These models attend all the words in the source sequence for each target token, which makes them ineffective for long sequence translation. In this work, we propose a hard-attention based NMT model which selects a subset of source tokens for each target token to effectively handle long sequence translation. Due to the discrete nature of the hard-attention mechanism, we design a reinforcement learning algorithm coupled with reward shaping strategy to efficiently train it. Experimental results show that the proposed model performs better on long sequences and thereby achieves significant BLEU score improvement on English-German (EN-DE) and English-French (ENFR) translation tasks compared to the soft attention based NMT.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="indurthi-etal-2019-look">
<titleInfo>
<title>Look Harder: A Neural Machine Translation Model with Hard Attention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sathish</namePart>
<namePart type="given">Reddy</namePart>
<namePart type="family">Indurthi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Insoo</namePart>
<namePart type="family">Chung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sangha</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Soft-attention based Neural Machine Translation (NMT) models have achieved promising results on several translation tasks. These models attend all the words in the source sequence for each target token, which makes them ineffective for long sequence translation. In this work, we propose a hard-attention based NMT model which selects a subset of source tokens for each target token to effectively handle long sequence translation. Due to the discrete nature of the hard-attention mechanism, we design a reinforcement learning algorithm coupled with reward shaping strategy to efficiently train it. Experimental results show that the proposed model performs better on long sequences and thereby achieves significant BLEU score improvement on English-German (EN-DE) and English-French (ENFR) translation tasks compared to the soft attention based NMT.</abstract>
<identifier type="citekey">indurthi-etal-2019-look</identifier>
<identifier type="doi">10.18653/v1/P19-1290</identifier>
<location>
<url>https://aclanthology.org/P19-1290</url>
</location>
<part>
<date>2019-jul</date>
<extent unit="page">
<start>3037</start>
<end>3043</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Look Harder: A Neural Machine Translation Model with Hard Attention
%A Indurthi, Sathish Reddy
%A Chung, Insoo
%A Kim, Sangha
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 jul
%I Association for Computational Linguistics
%C Florence, Italy
%F indurthi-etal-2019-look
%X Soft-attention based Neural Machine Translation (NMT) models have achieved promising results on several translation tasks. These models attend all the words in the source sequence for each target token, which makes them ineffective for long sequence translation. In this work, we propose a hard-attention based NMT model which selects a subset of source tokens for each target token to effectively handle long sequence translation. Due to the discrete nature of the hard-attention mechanism, we design a reinforcement learning algorithm coupled with reward shaping strategy to efficiently train it. Experimental results show that the proposed model performs better on long sequences and thereby achieves significant BLEU score improvement on English-German (EN-DE) and English-French (ENFR) translation tasks compared to the soft attention based NMT.
%R 10.18653/v1/P19-1290
%U https://aclanthology.org/P19-1290
%U https://doi.org/10.18653/v1/P19-1290
%P 3037-3043
Markdown (Informal)
[Look Harder: A Neural Machine Translation Model with Hard Attention](https://aclanthology.org/P19-1290) (Indurthi et al., ACL 2019)
ACL