@inproceedings{chen-etal-2025-data,
title = "Data-Centric Improvements for Enhancing Multi-Modal Understanding in Spoken Conversation Modeling",
author = "Chen, Maximillian and
Sun, Ruoxi and
Arik, Sercan O",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.71/",
pages = "1366--1387",
ISBN = "979-8-89176-256-5",
abstract = "Conversational assistants are increasingly popular across diverse real-world applications, highlighting the need for advanced multimodal speech modeling. Speech, as a natural mode of communication, encodes rich user-specific characteristics such as speaking rate and pitch, making it critical for effective interaction. Our work introduces a data-centric customization approach for efficiently enhancing multimodal understanding in conversational speech modeling. Central to our contributions is a novel multi-task learning paradigm that involves designing auxiliary tasks to utilize a small amount of speech data. Our approach achieves state-of-the-art performance on the Spoken-SQuAD benchmark, using only 10{\%} of the training data with open-weight models, establishing a robust and efficient framework for audio-centric conversational modeling. We also introduce ASK-QA, the first dataset for multi-turn spoken dialogue with ambiguous user requests and dynamic evaluation inputs."
}
Markdown (Informal)
[Data-Centric Improvements for Enhancing Multi-Modal Understanding in Spoken Conversation Modeling](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.71/) (Chen et al., Findings 2025)
ACL