@inproceedings{xu-he-2023-security,
title = "Security Challenges in Natural Language Processing Models",
author = "Xu, Qiongkai and
He, Xuanli",
editor = "Zhang, Qi and
Sajjad, Hassan",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-tutorial.2",
doi = "10.18653/v1/2023.emnlp-tutorial.2",
pages = "7--12",
abstract = "Large-scale natural language processing models have been developed and integrated into numerous applications, given the advantage of their remarkable performance. Nonetheless, the security concerns associated with these models prevent the widespread adoption of these black-box machine learning models. In this tutorial, we will dive into three emerging security issues in NLP research, i.e., backdoor attacks, private data leakage, and imitation attacks. These threats will be introduced in accordance with their threatening usage scenarios, attack methodologies, and defense technologies.",
}
Markdown (Informal)
[Security Challenges in Natural Language Processing Models](https://aclanthology.org/2023.emnlp-tutorial.2) (Xu & He, EMNLP 2023)
ACL