@inproceedings{luo-etal-2026-simple,
title = "A Simple and Efficient Learning-Style Prompting for {LLM} Jailbreaking",
author = "Luo, Xuan and
Wang, Yue and
He, Zefeng and
Tu, Geng and
Li, Jing and
Xu, Ruifeng",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.124/",
pages = "2389--2406",
ISBN = "979-8-89176-386-9",
abstract = "This study reveals a critical safety blind spot in modern LLMs: learning-style queries, which closely resemble ordinary educational questions, can reliably elicit harmful responses.The learning-style queries are constructed by a novel reframing paradigm: HILL (Hiding Intention by Learning from LLMs). The deterministic, model-agnostic reframing framework is composed of 4 conceptual components: 1) key concept, 2) exploratory transformation, 3) detail-oriented inquiry, and optionally 4) hypotheticality.Further, new metrics are introduced to thoroughly evaluate the efficiency and harmfulness of jailbreak methods.Experiments on the AdvBench dataset across a wide range of models demonstrate HILL{'}s strong generalizability. It achieves top attack success rates on the majority of models and across malicious categories while maintaining high efficiency with concise prompts. On the other hand, results of various defense methods show the robustness of HILL, with most defenses having mediocre effects or even increasing the attack success rates. In addition, the assessment of defenses on the constructed safe prompts reveals inherent limitations of LLMs' safety mechanisms and flaws in the defense methods. This work exposes significant vulnerabilities of safety measures against learning-style elicitation, highlighting a critical challenge of fulfilling both helpfulness and safety alignments."
}Markdown (Informal)
[A Simple and Efficient Learning-Style Prompting for LLM Jailbreaking](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.124/) (Luo et al., Findings 2026)
ACL