@inproceedings{sritharan-thayasivam-2025-wavcse,
title = "wav{CSE}: Learning Fixed-size Unified Speech Embeddings via Feature-based Multi-Task Learning",
author = "Sritharan, Braveenan and
Thayasivam, Uthayasanker",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.48/",
pages = "879--887",
ISBN = "979-8-89176-298-5",
abstract = "Modern speech applications require compact embeddings that generalize across both linguistic and paralinguistic tasks. However, most existing embeddings are task-specific and fail to transfer effectively across domains. We propose wavCSE, a feature-based multi-task learning model that produces a fixed-size unified speech embedding suitable for both linguistic and paralinguistic tasks. wavCSE is jointly trained on keyword spotting, speaker identification, and emotion recognition, achieving state-of-the-art performance on all three tasks. The resulting unified embedding is then evaluated on twelve downstream tasks spanning both linguistic and paralinguistic domains. Experimental results show that it outperforms strong baselines on nine of the twelve tasks, indicating effective generalization across domains. To streamline embedding generation, we introduce a recursive layer selection strategy that identifies the most informative hidden layer outputs from the upstream model and refine how these selected outputs are aggregated in the downstream model. These enhancements reduce memory usage and computational cost while improving task performance, making them broadly applicable to self-supervised learning-based speech processing models."
}Markdown (Informal)
[wavCSE: Learning Fixed-size Unified Speech Embeddings via Feature-based Multi-Task Learning](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.48/) (Sritharan & Thayasivam, IJCNLP-AACL 2025)
ACL
- Braveenan Sritharan and Uthayasanker Thayasivam. 2025. wavCSE: Learning Fixed-size Unified Speech Embeddings via Feature-based Multi-Task Learning. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 879–887, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.