@inproceedings{fang-etal-2024-ctc,
title = "{CTC}-based Non-autoregressive Textless Speech-to-Speech Translation",
author = "Fang, Qingkai and
Ma, Zhengrui and
Zhou, Yan and
Zhang, Min and
Feng, Yang",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.543/",
doi = "10.18653/v1/2024.findings-acl.543",
pages = "9155--9161",
abstract = "Direct speech-to-speech translation (S2ST) has achieved impressive translation quality, but it often faces the challenge of slow decoding due to the considerable length of speech sequences. Recently, some research has turned to non-autoregressive (NAR) models to expedite decoding, yet the translation quality typically lags behind autoregressive (AR) models significantly. In this paper, we investigate the performance of CTC-based NAR models in S2ST, as these models have shown impressive results in machine translation. Experimental results demonstrate that by combining pretraining, knowledge distillation, and advanced NAR training techniques such as glancing training and non-monotonic latent alignments, CTC-based NAR models achieve translation quality comparable to the AR model, while preserving up to 26.81$\times$ decoding speedup."
}
Markdown (Informal)
[CTC-based Non-autoregressive Textless Speech-to-Speech Translation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.543/) (Fang et al., Findings 2024)
ACL