@inproceedings{ebrahimi-etal-2018-adversarial,
title = "On Adversarial Examples for Character-Level Neural Machine Translation",
author = "Ebrahimi, Javid and
Lowd, Daniel and
Dou, Dejing",
editor = "Bender, Emily M. and
Derczynski, Leon and
Isabelle, Pierre",
booktitle = "Proceedings of the 27th International Conference on Computational Linguistics",
month = aug,
year = "2018",
address = "Santa Fe, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/C18-1055/",
pages = "653--663",
abstract = "Evaluating on adversarial examples has become a standard procedure to measure robustness of deep learning models. Due to the difficulty of creating white-box adversarial examples for discrete text input, most analyses of the robustness of NLP models have been done through black-box adversarial examples. We investigate adversarial examples for character-level neural machine translation (NMT), and contrast black-box adversaries with a novel white-box adversary, which employs differentiable string-edit operations to rank adversarial changes. We propose two novel types of attacks which aim to remove or change a word in a translation, rather than simply break the NMT. We demonstrate that white-box adversarial examples are significantly stronger than their black-box counterparts in different attack scenarios, which show more serious vulnerabilities than previously known. In addition, after performing adversarial training, which takes only 3 times longer than regular training, we can improve the model`s robustness significantly."
}
Markdown (Informal)
[On Adversarial Examples for Character-Level Neural Machine Translation](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/C18-1055/) (Ebrahimi et al., COLING 2018)
ACL