@inproceedings{bapna-etal-2018-training,
title = "Training Deeper Neural Machine Translation Models with Transparent Attention",
author = "Bapna, Ankur and
Chen, Mia and
Firat, Orhan and
Cao, Yuan and
Wu, Yonghui",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/D18-1338/",
doi = "10.18653/v1/D18-1338",
pages = "3028--3033",
abstract = "While current state-of-the-art NMT models, such as RNN seq2seq and Transformers, possess a large number of parameters, they are still shallow in comparison to convolutional models used for both text and vision applications. In this work we attempt to train significantly (2-3x) deeper Transformer and Bi-RNN encoders for machine translation. We propose a simple modification to the attention mechanism that eases the optimization of deeper models, and results in consistent gains of 0.7-1.1 BLEU on the benchmark WMT{'}14 English-German and WMT{'}15 Czech-English tasks for both architectures."
}
Markdown (Informal)
[Training Deeper Neural Machine Translation Models with Transparent Attention](https://preview.aclanthology.org/fix-sig-urls/D18-1338/) (Bapna et al., EMNLP 2018)
ACL