@inproceedings{gainski-balazy-2023-step,
title = "Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks",
author = "Gai{\'n}ski, Piotr and
Ba{\l}azy, Klaudia",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.eacl-main.149/",
doi = "10.18653/v1/2023.eacl-main.149",
pages = "2038--2048",
abstract = "We propose a novel gradient-based attack against transformer-based language models that searches for an adversarial example in a continuous space of tokens probabilities. Our algorithm mitigates the gap between adversarial loss for continuous and discrete text representations by performing multi-step quantization in a quantization-compensation loop. Experiments show that our method significantly outperforms other approaches on various natural language processing (NLP) tasks."
}
Markdown (Informal)
[Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.eacl-main.149/) (Gaiński & Bałazy, EACL 2023)
ACL