@inproceedings{gabriel-etal-2022-naturaladversaries,
title = "{N}atural{A}dversaries: Can Naturalistic Adversaries Be as Effective as Artificial Adversaries?",
author = "Gabriel, Saadia and
Palangi, Hamid and
Choi, Yejin",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2022.findings-emnlp.413/",
doi = "10.18653/v1/2022.findings-emnlp.413",
pages = "5635--5645",
abstract = "While a substantial body of prior work has explored adversarial example generation for natural language understanding tasks, these examples are often unrealistic and diverge from the real-world data distributions. In this work, we introduce a two-stage adversarial example generation framework (NaturalAdversaries), for designing adversaries that are effective at fooling a given classifier and demonstrate natural-looking failure cases that could plausibly occur during in-the-wild deployment of the models. At the first stage a token attribution method is used to summarize a given classifier`s behavior as a function of the key tokens in the input. In the second stage a generative model is conditioned on the key tokens from the first stage. NaturalAdversaries is adaptable to both black-box and white-box adversarial attacks based on the level of access to the model parameters. Our results indicate these adversaries generalize across domains, and offer insights for future research on improving robustness of neural text classification models."
}
Markdown (Informal)
[NaturalAdversaries: Can Naturalistic Adversaries Be as Effective as Artificial Adversaries?](https://preview.aclanthology.org/Author-page-Marten-During-lu/2022.findings-emnlp.413/) (Gabriel et al., Findings 2022)
ACL