@inproceedings{wang-etal-2022-foiling,
title = "Foiling Training-Time Attacks on Neural Machine Translation Systems",
author = "Wang, Jun and
He, Xuanli and
Rubinstein, Benjamin and
Cohn, Trevor",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.435/",
doi = "10.18653/v1/2022.findings-emnlp.435",
pages = "5906--5913",
abstract = "Neural machine translation (NMT) systems are vulnerable to backdoor attacks, whereby an attacker injects poisoned samples into training such that a trained model produces malicious translations. Nevertheless, there is little research on defending against such backdoor attacks in NMT. In this paper, we first show that backdoor attacks that have been successful in text classification are also effective against machine translation tasks. We then present a novel defence method that exploits a key property of most backdoor attacks: namely the asymmetry between the source and target language sentences, which is used to facilitate malicious text insertions, substitutions and suchlike. Our technique uses word alignment coupled with language model scoring to detect outlier tokens, and thus can find and filter out training instances which may contain backdoors. Experimental results demonstrate that our technique can significantly reduce the success of various attacks by up to 89.0{\%}, while not affecting predictive accuracy."
}
Markdown (Informal)
[Foiling Training-Time Attacks on Neural Machine Translation Systems](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.435/) (Wang et al., Findings 2022)
ACL