@inproceedings{tang-etal-2018-self,
title = "Why Self-Attention? A Targeted Evaluation of Neural Machine Translation Architectures",
author = {Tang, Gongbo and
M{\"u}ller, Mathias and
Rios, Annette and
Sennrich, Rico},
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/D18-1458/",
doi = "10.18653/v1/D18-1458",
pages = "4263--4272",
abstract = "Recently, non-recurrent architectures (convolutional, self-attentional) have outperformed RNNs in neural machine translation. CNNs and self-attentional networks can connect distant words via shorter network paths than RNNs, and it has been speculated that this improves their ability to model long-range dependencies. However, this theoretical argument has not been tested empirically, nor have alternative explanations for their strong performance been explored in-depth. We hypothesize that the strong performance of CNNs and self-attentional networks could also be due to their ability to extract semantic features from the source text, and we evaluate RNNs, CNNs and self-attention networks on two tasks: subject-verb agreement (where capturing long-range dependencies is required) and word sense disambiguation (where semantic feature extraction is required). Our experimental results show that: 1) self-attentional networks and CNNs do not outperform RNNs in modeling subject-verb agreement over long distances; 2) self-attentional networks perform distinctly better than RNNs and CNNs on word sense disambiguation."
}
Markdown (Informal)
[Why Self-Attention? A Targeted Evaluation of Neural Machine Translation Architectures](https://preview.aclanthology.org/jlcl-multiple-ingestion/D18-1458/) (Tang et al., EMNLP 2018)
ACL