@inproceedings{lugo-vielzeuf-2024-towards,
title = "Towards efficient self-supervised representation learning in speech processing",
author = "Lugo, Luis and
Vielzeuf, Valentin",
editor = "Graham, Yvette and
Purver, Matthew",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2024",
month = mar,
year = "2024",
address = "St. Julian{'}s, Malta",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-eacl.23/",
pages = "340--346",
abstract = "Self-supervised learning has achieved impressive results in speech processing, but current models are computationally expensive, generating environmental concerns because of their high energy consumption. Therefore, we propose an efficient self-supervised approach to address high computational costs, using a single GPU during 24 to 48 hours of pretraining. The proposed approach combines linear, convolutional, and self-attention layers with several optimizations, including dynamic batching, flash attention, mixed-precision training, gradient accumulation, and acoustic feature extraction with input preprocessing. Computational cost estimations for our proposed model represent up to two orders of magnitude improvements in computational efficiency against existing speech models."
}
Markdown (Informal)
[Towards efficient self-supervised representation learning in speech processing](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-eacl.23/) (Lugo & Vielzeuf, Findings 2024)
ACL