@inproceedings{yifeng-etal-2025-swallowing,
title = "Swallowing the Poison Pills: Insights from Vulnerability Disparity Among {LLM}s",
author = "Yifeng, Peng and
Wu, Zhizheng and
Chen, Chen",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.3/",
pages = "38--55",
ISBN = "979-8-89176-303-6",
abstract = "Modern large language models (LLMs) exhibit critical vulnerabilities to poison pill attacks{---}localized data poisoning that alters specific factual knowledge while preserving overall model utility. We systematically demonstrate these attacks exploit inherent architectural properties of LLMs, achieving 54.6{\%} increased retrieval inaccuracy on long-tail knowledge versus dominant topics and up to 25.5{\%} increase retrieval inaccuracy on compressed models versus original architectures. Through controlled mutations (e.g. temporal/spatial/entity alterations) and , our method induces localized memorization deterioration with negligible impact on models' performance on regular standard benchmarks (e.g., {\ensuremath{<}}2{\%} performance drop on MMLU/GPQA), leading to potential detection evasion. Our findings suggest: (1) Disproportionate vulnerability in long-tail knowledge may result from reduced parameter redundancy ; (2) Model compression may increase attack surfaces, with pruned/distilled models requiring 30{\%} fewer poison samples for equivalent damage; (3) Associative memory enables both spread of collateral damage to related concepts and amplification of damage from simultaneous attack, particularly for dominant topics. These findings raise concerns over current scaling paradigms since attack costs are lowering while defense complexity is rising. Our work establishes poison pills as both a security threat and diagnostic tool, revealing critical security-efficiency trade-offs in language model compression that challenge prevailing safety assumptions."
}Markdown (Informal)
[Swallowing the Poison Pills: Insights from Vulnerability Disparity Among LLMs](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.3/) (Yifeng et al., Findings 2025)
ACL
- Peng Yifeng, Zhizheng Wu, and Chen Chen. 2025. Swallowing the Poison Pills: Insights from Vulnerability Disparity Among LLMs. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 38–55, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.