@inproceedings{imamura-sumita-2020-transformer,
title = "Transformer-based Double-token Bidirectional Autoregressive Decoding in Neural Machine Translation",
author = "Imamura, Kenji and
Sumita, Eiichiro",
booktitle = "Proceedings of the 7th Workshop on Asian Translation",
month = dec,
year = "2020",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.wat-1.3",
pages = "50--57",
abstract = "This paper presents a simple method that extends a standard Transformer-based autoregressive decoder, to speed up decoding. The proposed method generates a token from the head and tail of a sentence (two tokens in total) in each step. By simultaneously generating multiple tokens that rarely depend on each other, the decoding speed is increased while the degradation in translation quality is minimized. In our experiments, the proposed method increased the translation speed by around 113{\%}-155{\%} in comparison with a standard autoregressive decoder, while degrading the BLEU scores by no more than 1.03. It was faster than an iterative non-autoregressive decoder in many conditions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="imamura-sumita-2020-transformer">
<titleInfo>
<title>Transformer-based Double-token Bidirectional Autoregressive Decoding in Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kenji</namePart>
<namePart type="family">Imamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eiichiro</namePart>
<namePart type="family">Sumita</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 7th Workshop on Asian Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents a simple method that extends a standard Transformer-based autoregressive decoder, to speed up decoding. The proposed method generates a token from the head and tail of a sentence (two tokens in total) in each step. By simultaneously generating multiple tokens that rarely depend on each other, the decoding speed is increased while the degradation in translation quality is minimized. In our experiments, the proposed method increased the translation speed by around 113%-155% in comparison with a standard autoregressive decoder, while degrading the BLEU scores by no more than 1.03. It was faster than an iterative non-autoregressive decoder in many conditions.</abstract>
<identifier type="citekey">imamura-sumita-2020-transformer</identifier>
<location>
<url>https://aclanthology.org/2020.wat-1.3</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>50</start>
<end>57</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transformer-based Double-token Bidirectional Autoregressive Decoding in Neural Machine Translation
%A Imamura, Kenji
%A Sumita, Eiichiro
%S Proceedings of the 7th Workshop on Asian Translation
%D 2020
%8 dec
%I Association for Computational Linguistics
%C Suzhou, China
%F imamura-sumita-2020-transformer
%X This paper presents a simple method that extends a standard Transformer-based autoregressive decoder, to speed up decoding. The proposed method generates a token from the head and tail of a sentence (two tokens in total) in each step. By simultaneously generating multiple tokens that rarely depend on each other, the decoding speed is increased while the degradation in translation quality is minimized. In our experiments, the proposed method increased the translation speed by around 113%-155% in comparison with a standard autoregressive decoder, while degrading the BLEU scores by no more than 1.03. It was faster than an iterative non-autoregressive decoder in many conditions.
%U https://aclanthology.org/2020.wat-1.3
%P 50-57
Markdown (Informal)
[Transformer-based Double-token Bidirectional Autoregressive Decoding in Neural Machine Translation](https://aclanthology.org/2020.wat-1.3) (Imamura & Sumita, WAT 2020)
ACL