@inproceedings{zhang-etal-2025-amia,
title = "{AMIA}: Automatic Masking and Joint Intention Analysis Makes {LVLM}s Robust Jailbreak Defenders",
author = "Zhang, Yuqi and
Miao, Yuchun and
Li, Zuchao and
Ding, Liang",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.651/",
doi = "10.18653/v1/2025.findings-emnlp.651",
pages = "12189--12199",
ISBN = "979-8-89176-335-7",
abstract = "We introduce AMIA, a lightweight, inference-only defense for Large Vision{--}Language Models (LVLMs) that (1) Automatically Masks a small set of text-irrelevant image patches to disrupt adversarial perturbations, and (2) conducts joint Intention Analysis to uncover and mitigate hidden harmful intents before response generation. Without any retraining, AMIA improves defense success rates across diverse LVLMs and jailbreak benchmarks from an average of 52.4{\%} to 81.7{\%}, preserves general utility with only a 2{\%} average accuracy drop, and incurs only modest inference overhead. Ablation confirms that both masking and intention analysis are essential for robust safety{--}utility trade-off. Our code will be released."
}Markdown (Informal)
[AMIA: Automatic Masking and Joint Intention Analysis Makes LVLMs Robust Jailbreak Defenders](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.651/) (Zhang et al., Findings 2025)
ACL