@inproceedings{zhao-etal-2026-unraveling,
title = "Unraveling {LLM} Jailbreaks Through Safety Knowledge Neurons",
author = "Zhao, Chongwen and
Ke, Yutong and
Huang, Kaizhu",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.83/",
pages = "1889--1906",
ISBN = "979-8-89176-380-7",
abstract = "Large Language Models (LLMs) have achieved substantial progress in alignment, ensuring safer and more reliable outputs. However, jailbreak attacks can still bypass these safeguards and provoke harmful responses from well-aligned models. While some studies have achieved defenses against jailbreak attacks by modifying output distributions or detecting harmful content, the exact rationale still remains elusive. In this work, we present a novel neuron-level interpretability method that focuses on the role of safety-related knowledge neurons. Unlike existing approaches, our method projects the model{'}s internal representation into a more consistent and interpretable vocabulary space. We then show that adjusting the activation of safety-related neurons can effectively control the model{'}s behavior with a mean ASR higher than 97{\%}. Building on this insight, we propose SafeTuning, a fine-tuning strategy that reinforces safety-critical neurons to improve model robustness against jailbreaks. SafeTuning consistently reduces attack success rates across multiple LLMs and outperforms all four baseline defenses. These findings offer a new perspective on understanding and defending against jailbreak attacks."
}Markdown (Informal)
[Unraveling LLM Jailbreaks Through Safety Knowledge Neurons](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.83/) (Zhao et al., EACL 2026)
ACL
- Chongwen Zhao, Yutong Ke, and Kaizhu Huang. 2026. Unraveling LLM Jailbreaks Through Safety Knowledge Neurons. In Proceedings of the 19th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1889–1906, Rabat, Morocco. Association for Computational Linguistics.