@inproceedings{cui-etal-2025-voxeval,
title = "{V}ox{E}val: Benchmarking the Knowledge Understanding Capabilities of End-to-End Spoken Language Models",
author = "Cui, Wenqian and
Jiao, Xiaoqi and
Meng, Ziqiao and
King, Irwin",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.818/",
pages = "16735--16753",
ISBN = "979-8-89176-251-0",
abstract = "With the rising need for speech-based interaction models, end-to-end Spoken Language Models (SLMs) have emerged as a promising solution. While these models require comprehensive world knowledge for meaningful and reliable human interactions, existing question-answering (QA) benchmarks fall short in evaluating SLMs' knowledge understanding due to their inability to support end-to-end speech evaluation and account for varied input audio conditions. To address these limitations, we present VoxEval, a novel SpeechQA benchmark that assesses SLMs' knowledge understanding through pure speech interactions. Our benchmark uniquely maintains speech format for both inputs and outputs, evaluates model robustness across diverse input audio conditions, and pioneers the assessment of complex tasks like mathematical reasoning in spoken format. Through systematic evaluation, we demonstrate that current SLMs exhibit poor performance on VoxEval, show sensitivity to varying audio conditions, and possess limited reasoning capabilities, highlighting critical areas for future development. VoxEval dataset is available at: https://github.com/dreamtheater123/VoxEval"
}
Markdown (Informal)
[VoxEval: Benchmarking the Knowledge Understanding Capabilities of End-to-End Spoken Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.818/) (Cui et al., ACL 2025)
ACL