@inproceedings{junczys-dowmunt-grundkiewicz-2017-exploration,
title = "An Exploration of Neural Sequence-to-Sequence Architectures for Automatic Post-Editing",
author = "Junczys-Dowmunt, Marcin and
Grundkiewicz, Roman",
editor = "Kondrak, Greg and
Watanabe, Taro",
booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = nov,
year = "2017",
address = "Taipei, Taiwan",
publisher = "Asian Federation of Natural Language Processing",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/I17-1013/",
pages = "120--129",
abstract = "In this work, we explore multiple neural architectures adapted for the task of automatic post-editing of machine translation output. We focus on neural end-to-end models that combine both inputs $mt$ (raw MT output) and $src$ (source language input) in a single neural architecture, modeling $\{mt, src\} \rightarrow pe$ directly. Apart from that, we investigate the influence of hard-attention models which seem to be well-suited for monolingual tasks, as well as combinations of both ideas. We report results on data sets provided during the WMT-2016 shared task on automatic post-editing and can demonstrate that dual-attention models that incorporate all available data in the APE scenario in a single model improve on the best shared task system and on all other published results after the shared task. Dual-attention models that are combined with hard attention remain competitive despite applying fewer changes to the input."
}
Markdown (Informal)
[An Exploration of Neural Sequence-to-Sequence Architectures for Automatic Post-Editing](https://preview.aclanthology.org/add-emnlp-2024-awards/I17-1013/) (Junczys-Dowmunt & Grundkiewicz, IJCNLP 2017)
ACL