@inproceedings{pylypenko-rubino-2018-dfki,
title = "{DFKI}-{MLT} System Description for the {WMT}18 Automatic Post-editing Task",
author = "Pylypenko, Daria and
Rubino, Raphael",
editor = "Bojar, Ond{\v{r}}ej and
Chatterjee, Rajen and
Federmann, Christian and
Fishel, Mark and
Graham, Yvette and
Haddow, Barry and
Huck, Matthias and
Yepes, Antonio Jimeno and
Koehn, Philipp and
Monz, Christof and
Negri, Matteo and
N{\'e}v{\'e}ol, Aur{\'e}lie and
Neves, Mariana and
Post, Matt and
Specia, Lucia and
Turchi, Marco and
Verspoor, Karin",
booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/W18-6469/",
doi = "10.18653/v1/W18-6469",
pages = "836--839",
abstract = "This paper presents the Automatic Post-editing (APE) systems submitted by the DFKI-MLT group to the WMT`18 APE shared task. Three monolingual neural sequence-to-sequence APE systems were trained using target-language data only: one using an attentional recurrent neural network architecture and two using the attention-only (\textit{transformer}) architecture. The training data was composed of machine translated (MT) output used as source to the APE model aligned with their manually post-edited version or reference translation as target. We made use of the provided training sets only and trained APE models applicable to phrase-based and neural MT outputs. Results show better performances reached by the attention-only model over the recurrent one, significant improvement over the baseline when post-editing phrase-based MT output but degradation when applied to neural MT output."
}
Markdown (Informal)
[DFKI-MLT System Description for the WMT18 Automatic Post-editing Task](https://preview.aclanthology.org/add-emnlp-2024-awards/W18-6469/) (Pylypenko & Rubino, WMT 2018)
ACL