@inproceedings{sultana-etal-2025-insight,
title = "From Insight to Exploit: Leveraging {LLM} Collaboration for Adaptive Adversarial Text Generation",
author = "Sultana, Najrin and
Rashid, Md Rafi Ur and
Gu, Kang and
Mehnaz, Shagufta",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1244/",
doi = "10.18653/v1/2025.findings-emnlp.1244",
pages = "22842--22859",
ISBN = "979-8-89176-335-7",
abstract = "LLMs can provide substantial zero-shot performance on diverse tasks using a simple task prompt, eliminating the need for training or fine-tuning. However, when applying these models to sensitive tasks, it is crucial to thoroughly assess their robustness against adversarial inputs. In this work, we introduce Static Deceptor (StaDec) and Dynamic Deceptor (DyDec), two innovative attack frameworks designed to systematically generate dynamic and adaptive adversarial examples by leveraging the understanding of the LLMs. We produce subtle and natural-looking adversarial inputs that preserve semantic similarity to the original text while effectively deceiving the target LLM. By utilizing an automated, LLM-driven pipeline, we eliminate the dependence on external heuristics. Our attacks evolve with the advancements in LLMs, while demonstrating a strong transferability across models unknown to the attacker. Overall, this work provides a systematic approach for self-assessing the robustness of the LLMs. We release our code and data at https://github.com/Shukti042/AdversarialExample."
}Markdown (Informal)
[From Insight to Exploit: Leveraging LLM Collaboration for Adaptive Adversarial Text Generation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1244/) (Sultana et al., Findings 2025)
ACL