@inproceedings{woo-etal-2025-think,
title = "Think, Verbalize, then Speak: Bridging Complex Thoughts and Comprehensible Speech",
author = "Woo, Tony and
Lee, Sehun and
Kim, Kang-wook and
Kim, Gunhee",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.726/",
pages = "14373--14390",
ISBN = "979-8-89176-332-6",
abstract = "Spoken dialogue systems increasingly employ large language models (LLMs) to leverage their advanced reasoning capabilities. However, direct application of LLMs in spoken communication often yield suboptimal results due to mismatches between optimal textual and verbal delivery. While existing approaches adapt LLMs to produce speech-friendly outputs, their impact on reasoning performance remains underexplored. In this work, we propose **Think-Verbalize-Speak**, a framework that decouples reasoning from spoken delivery to preserve the full reasoning capacity of LLMs. Central to our method is *verbalizing*, an intermediate step that translates thoughts into natural, speech-ready text. We also introduce **ReVerT**, a latency-efficient verbalizer based on incremental and asynchronous summarization. Experiments across multiple benchmarks show that our method enhances speech naturalness and conciseness with minimal impact on reasoning. The project page with the dataset and the source code is available at https://yhytoto12.github.io/TVS-ReVerT."
}Markdown (Informal)
[Think, Verbalize, then Speak: Bridging Complex Thoughts and Comprehensible Speech](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.726/) (Woo et al., EMNLP 2025)
ACL