@inproceedings{pylypenko-rubino-2018-dfki,
title = "{DFKI}-{MLT} System Description for the {WMT}18 Automatic Post-editing Task",
author = "Pylypenko, Daria and
Rubino, Raphael",
booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6469",
doi = "10.18653/v1/W18-6469",
pages = "836--839",
abstract = "This paper presents the Automatic Post-editing (APE) systems submitted by the DFKI-MLT group to the WMT{'}18 APE shared task. Three monolingual neural sequence-to-sequence APE systems were trained using target-language data only: one using an attentional recurrent neural network architecture and two using the attention-only (\textit{transformer}) architecture. The training data was composed of machine translated (MT) output used as source to the APE model aligned with their manually post-edited version or reference translation as target. We made use of the provided training sets only and trained APE models applicable to phrase-based and neural MT outputs. Results show better performances reached by the attention-only model over the recurrent one, significant improvement over the baseline when post-editing phrase-based MT output but degradation when applied to neural MT output.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pylypenko-rubino-2018-dfki">
<titleInfo>
<title>DFKI-MLT System Description for the WMT18 Automatic Post-editing Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daria</namePart>
<namePart type="family">Pylypenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raphael</namePart>
<namePart type="family">Rubino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Conference on Machine Translation: Shared Task Papers</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Belgium, Brussels</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents the Automatic Post-editing (APE) systems submitted by the DFKI-MLT group to the WMT’18 APE shared task. Three monolingual neural sequence-to-sequence APE systems were trained using target-language data only: one using an attentional recurrent neural network architecture and two using the attention-only (transformer) architecture. The training data was composed of machine translated (MT) output used as source to the APE model aligned with their manually post-edited version or reference translation as target. We made use of the provided training sets only and trained APE models applicable to phrase-based and neural MT outputs. Results show better performances reached by the attention-only model over the recurrent one, significant improvement over the baseline when post-editing phrase-based MT output but degradation when applied to neural MT output.</abstract>
<identifier type="citekey">pylypenko-rubino-2018-dfki</identifier>
<identifier type="doi">10.18653/v1/W18-6469</identifier>
<location>
<url>https://aclanthology.org/W18-6469</url>
</location>
<part>
<date>2018-oct</date>
<extent unit="page">
<start>836</start>
<end>839</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DFKI-MLT System Description for the WMT18 Automatic Post-editing Task
%A Pylypenko, Daria
%A Rubino, Raphael
%S Proceedings of the Third Conference on Machine Translation: Shared Task Papers
%D 2018
%8 oct
%I Association for Computational Linguistics
%C Belgium, Brussels
%F pylypenko-rubino-2018-dfki
%X This paper presents the Automatic Post-editing (APE) systems submitted by the DFKI-MLT group to the WMT’18 APE shared task. Three monolingual neural sequence-to-sequence APE systems were trained using target-language data only: one using an attentional recurrent neural network architecture and two using the attention-only (transformer) architecture. The training data was composed of machine translated (MT) output used as source to the APE model aligned with their manually post-edited version or reference translation as target. We made use of the provided training sets only and trained APE models applicable to phrase-based and neural MT outputs. Results show better performances reached by the attention-only model over the recurrent one, significant improvement over the baseline when post-editing phrase-based MT output but degradation when applied to neural MT output.
%R 10.18653/v1/W18-6469
%U https://aclanthology.org/W18-6469
%U https://doi.org/10.18653/v1/W18-6469
%P 836-839
Markdown (Informal)
[DFKI-MLT System Description for the WMT18 Automatic Post-editing Task](https://aclanthology.org/W18-6469) (Pylypenko & Rubino, 2018)
ACL