@inproceedings{mohapatra-etal-2025-llms,
title = "Can {LLM}s Understand Unvoiced Speech? Exploring {EMG}-to-Text Conversion with {LLM}s",
author = "Mohapatra, Payal and
Pandey, Akash and
Zhang, Xiaoyuan and
Zhu, Qi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.acl-short.56/",
pages = "703--712",
ISBN = "979-8-89176-252-7",
abstract = "Unvoiced electromyography (EMG) is an effective communication tool for individuals unable to produce vocal speech. However, most prior methods rely on paired voiced and unvoiced EMG signals, along with speech data, for unvoiced EMG-to-text conversion, which is not practical for these individuals. Given the rise of large language models (LLMs) in speech recognition, we explore their potential to understand unvoiced speech. To this end, we address the challenge of \textit{learning from unvoiced EMG alone} and propose a novel EMG adaptor module that maps EMG features to an LLM{'}s input space, achieving an average word error rate of 0.49 on a closed-vocabulary unvoiced EMG-to-text task. Even with a conservative data availability of just six minutes, our approach improves performance over specialized models by nearly 20{\%}. While LLMs have been shown to be extendable to new language modalities{---}such as audio{---}understanding articulatory biosignals, like unvoiced EMG, is more challenging. This work takes a crucial first step toward enabling LLMs to comprehend unvoiced speech using surface EMG."
}
Markdown (Informal)
[Can LLMs Understand Unvoiced Speech? Exploring EMG-to-Text Conversion with LLMs](https://preview.aclanthology.org/landing_page/2025.acl-short.56/) (Mohapatra et al., ACL 2025)
ACL