@inproceedings{libovicky-etal-2018-input,
title = "Input Combination Strategies for Multi-Source Transformer Decoder",
author = "Libovick{\'y}, Jind{\v{r}}ich and
Helcl, Jind{\v{r}}ich and
Mare{\v{c}}ek, David",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6326",
doi = "10.18653/v1/W18-6326",
pages = "253--260",
abstract = "In multi-source sequence-to-sequence tasks, the attention mechanism can be modeled in several ways. This topic has been thoroughly studied on recurrent architectures. In this paper, we extend the previous work to the encoder-decoder attention in the Transformer architecture. We propose four different input combination strategies for the encoder-decoder attention: serial, parallel, flat, and hierarchical. We evaluate our methods on tasks of multimodal translation and translation with multiple source languages. The experiments show that the models are able to use multiple sources and improve over single source baselines.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="libovicky-etal-2018-input">
<titleInfo>
<title>Input Combination Strategies for Multi-Source Transformer Decoder</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jindřich</namePart>
<namePart type="family">Libovický</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jindřich</namePart>
<namePart type="family">Helcl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Mareček</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Conference on Machine Translation: Research Papers</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In multi-source sequence-to-sequence tasks, the attention mechanism can be modeled in several ways. This topic has been thoroughly studied on recurrent architectures. In this paper, we extend the previous work to the encoder-decoder attention in the Transformer architecture. We propose four different input combination strategies for the encoder-decoder attention: serial, parallel, flat, and hierarchical. We evaluate our methods on tasks of multimodal translation and translation with multiple source languages. The experiments show that the models are able to use multiple sources and improve over single source baselines.</abstract>
<identifier type="citekey">libovicky-etal-2018-input</identifier>
<identifier type="doi">10.18653/v1/W18-6326</identifier>
<location>
<url>https://aclanthology.org/W18-6326</url>
</location>
<part>
<date>2018-oct</date>
<extent unit="page">
<start>253</start>
<end>260</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Input Combination Strategies for Multi-Source Transformer Decoder
%A Libovický, Jindřich
%A Helcl, Jindřich
%A Mareček, David
%S Proceedings of the Third Conference on Machine Translation: Research Papers
%D 2018
%8 oct
%I Association for Computational Linguistics
%C Brussels, Belgium
%F libovicky-etal-2018-input
%X In multi-source sequence-to-sequence tasks, the attention mechanism can be modeled in several ways. This topic has been thoroughly studied on recurrent architectures. In this paper, we extend the previous work to the encoder-decoder attention in the Transformer architecture. We propose four different input combination strategies for the encoder-decoder attention: serial, parallel, flat, and hierarchical. We evaluate our methods on tasks of multimodal translation and translation with multiple source languages. The experiments show that the models are able to use multiple sources and improve over single source baselines.
%R 10.18653/v1/W18-6326
%U https://aclanthology.org/W18-6326
%U https://doi.org/10.18653/v1/W18-6326
%P 253-260
Markdown (Informal)
[Input Combination Strategies for Multi-Source Transformer Decoder](https://aclanthology.org/W18-6326) (Libovický et al., 2018)
ACL