@inproceedings{yan-etal-2025-rethinking,
title = "Rethinking Backdoor Detection Evaluation for Language Models",
author = "Yan, Jun and
Mo, Wenjie Jacky and
Ren, Xiang and
Jia, Robin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.318/",
doi = "10.18653/v1/2025.emnlp-main.318",
pages = "6239--6250",
ISBN = "979-8-89176-332-6",
abstract = "Backdoor attacks, in which a model behaves maliciously when given an attacker-specified trigger, pose a major security risk for practitioners who depend on publicly released language models. As a countermeasure, backdoor detection methods aim to detect whether a released model contains a backdoor. While existing backdoor detection methods have high accuracy in detecting backdoored models on standard benchmarks, it is unclear whether they can robustly identify backdoors in the wild. In this paper, we examine the robustness of backdoor detectors by manipulating different factors during backdoor planting. We find that the success of existing methods based on trigger inversion or meta classifiers highly depends on how intensely the model is trained on poisoned data. Specifically, backdoors planted with more aggressive or more conservative training are significantly more difficult to detect than the default ones. Our results highlight a lack of robustness of existing backdoor detectors and the limitations in current benchmark construction."
}Markdown (Informal)
[Rethinking Backdoor Detection Evaluation for Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.318/) (Yan et al., EMNLP 2025)
ACL