@inproceedings{chen-etal-2026-zer0,
title = "Zer0-Jack: A memory-efficient gradient-based jailbreaking method for black box Multi-modal Large Language Models",
author = "Chen, Tiejin and
Wang, Kaishen and
Wei, Hua",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.202/",
pages = "4328--4344",
ISBN = "979-8-89176-380-7",
abstract = "Multi-modal large language models (MLLMs) have recently shown impressive capabilities but are also highly vulnerable to jailbreak attacks. While white-box methods can generate adversarial visual inputs via gradient-based optimization, such approaches fail in realistic black-box settings where model parameters are inaccessible. Zeroth-order (ZO) optimization offers a natural path for black-box attacks by estimating gradients from queries, yet its application to MLLMs is challenging due to sequence-conditioned objectives, limited feedback, and massive model scales. To address these issues, we propose Zer0-Jack, the first direct black-box jailbreak framework for MLLMs based on ZO optimization. Zer0-Jack focuses on generating malicious images and introduces a patch-wise block coordinate descent strategy that stabilizes gradient estimation and reduces query complexity, enabling efficient optimization on billion-scale models. Experiments show that Zer0-Jack achieves 98.2{\%} success on MiniGPT-4 and 95{\%} on the Harmful Behaviors Multi-modal dataset, while directly jailbreaking commercial models such as GPT-4o. These results demonstrate that ZO optimization can be effectively adapted to jailbreak large-scale multi-modal LLMs. Codes are provided here."
}Markdown (Informal)
[Zer0-Jack: A memory-efficient gradient-based jailbreaking method for black box Multi-modal Large Language Models](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.202/) (Chen et al., EACL 2026)
ACL