@inproceedings{yoo-etal-2025-code,
title = "Code-Switching Red-Teaming: {LLM} Evaluation for Safety and Multilingual Understanding",
author = "Yoo, Haneul and
Yang, Yongjin and
Lee, Hwaran",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.657/",
pages = "13392--13413",
ISBN = "979-8-89176-251-0",
abstract = "As large language models (LLMs) have advanced rapidly, concerns regarding their safety have become prominent. In this paper, we discover that code-switching in red-teaming queries can effectively elicit undesirable behaviors of LLMs, which are common practices in natural language. We introduce a simple yet effective framework, CSRT, to synthesize code-switching red-teaming queries and investigate the safety and multilingual understanding of LLMs comprehensively. Through extensive experiments with ten state-of-the-art LLMs and code-switching queries combining up to 10 languages, we demonstrate that the CSRT significantly outperforms existing multilingual red-teaming techniques, achieving 46.7{\%} more attacks than standard attacks in English and being effective in conventional safety domains. We also examine the multilingual ability of those LLMs to generate and understand code-switching texts. Additionally, we validate the extensibility of the CSRT by generating code-switching attack prompts with monolingual data. We finally conduct detailed ablation studies exploring code-switching and propound unintended correlation between resource availability of languages and safety alignment in existing multilingual LLMs."
}
Markdown (Informal)
[Code-Switching Red-Teaming: LLM Evaluation for Safety and Multilingual Understanding](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.657/) (Yoo et al., ACL 2025)
ACL