@inproceedings{yang-etal-2025-withstand,
title = "Who Can Withstand Chat-Audio Attacks? An Evaluation Benchmark for Large Audio-Language Models",
author = "Yang, Wanqi and
Li, Yanda and
Fang, Meng and
Wei, Yunchao and
Chen, Ling",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.884/",
pages = "17205--17220",
ISBN = "979-8-89176-256-5",
abstract = "Adversarial audio attacks pose a significant threat to the growing use of large audio-language models (LALMs) in voice-based human-machine interactions. While existing research focused on model-specific adversarial methods, real-world applications demand a more generalizable and universal approach to audio adversarial attacks. In this paper, we introduce the Chat-Audio Attacks (CAA) benchmark including four distinct types of audio attacks, which aims to explore the vulnerabilities of LALMs to these audio attacks in conversational scenarios. To evaluate the robustness of LALMs, we propose three evaluation strategies: Standard Evaluation, utilizing traditional metrics to quantify model performance under attacks; GPT-4o-Based Evaluation, which simulates real-world conversational complexities; and Human Evaluation, offering insights into user perception and trust. We evaluate six state-of-the-art LALMs with voice interaction capabilities, including Gemini-1.5-Pro, GPT-4o, and others, using three distinct evaluation methods on the CAA benchmark. Our comprehensive analysis reveals the impact of four types of audio attacks on the performance of these models, demonstrating that GPT-4o exhibits the highest level of resilience. Our data can be accessed via the following link: CAA."
}
Markdown (Informal)
[Who Can Withstand Chat-Audio Attacks? An Evaluation Benchmark for Large Audio-Language Models](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.884/) (Yang et al., Findings 2025)
ACL