@inproceedings{li-etal-2025-diffusedef,
title = "{D}iffuse{D}ef: Improved Robustness to Adversarial Attacks via Iterative Denoising",
author = "Li, Zhenhao and
Zhou, Huichi and
Rei, Marek and
Specia, Lucia",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.454/",
pages = "9259--9274",
ISBN = "979-8-89176-251-0",
abstract = "Pretrained language models have significantly advanced performance across various natural language processing tasks. However, adversarial attacks continue to pose a critical challenge to system built using these models, as they can be exploited with carefully crafted adversarial texts. Inspired by the ability of diffusion models to predict and reduce noise in computer vision, we propose a novel and flexible adversarial defense method for language classification tasks, DiffuseDef, which incorporates a diffusion layer as a denoiser between the encoder and the classifier. The diffusion layer is trained on top of the existing classifier, ensuring seamless integration with any model in a plug-and-play manner. During inference, the adversarial hidden state is first combined with sampled noise, then denoised iteratively and finally ensembled to produce a robust text representation. By integrating adversarial training, denoising, and ensembling techniques, we show that DiffuseDef improves over existing adversarial defense methods and achieves state-of-the-art performance against common black-box and white-box adversarial attacks."
}
Markdown (Informal)
[DiffuseDef: Improved Robustness to Adversarial Attacks via Iterative Denoising](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.454/) (Li et al., ACL 2025)
ACL