@inproceedings{ren-etal-2025-shield,
title = "{SHIELD}: Classifier-Guided Prompting for Robust and Safer {LVLM}s",
author = "Ren, Juan and
Dras, Mark and
Naseem, Usman",
editor = "Kummerfeld, Jonathan K. and
Joshi, Aditya and
Dras, Mark",
booktitle = "Proceedings of The 23rd Annual Workshop of the Australasian Language Technology Association",
month = nov,
year = "2025",
address = "Sydney, Australia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-alta/2025.alta-main.6/",
pages = "76--89",
ISBN = "1834-7037",
abstract = "Large Vision-Language Models (LVLMs) unlock powerful multimodal reasoning but also expand the attack surface, particularly through adversarial inputs that conceal harmful goals in benign prompts. We propose SHIELD, a lightweight, model-agnostic preprocessing framework that couples fine-grained safety classification with category-specific guidance and explicit actions (Block, Reframe, and Forward). Unlike binary moderators, SHIELD composes tailored safety prompts that enforce nuanced refusals or safe redirections without retraining. Across five benchmarks and five representative LVLMs, SHIELD consistently lowers jailbreak and non-following rates while preserving utility. Our method is plug-and-play, incurs negligible overhead, and is easily extendable to new attack types{---}serving as a practical safety patch for both weakly and strongly aligned LVLMs."
}Markdown (Informal)
[SHIELD: Classifier-Guided Prompting for Robust and Safer LVLMs](https://preview.aclanthology.org/ingest-alta/2025.alta-main.6/) (Ren et al., ALTA 2025)
ACL