@inproceedings{zhao-etal-2024-parameter,
title = "Parameter-Efficient Transfer Learning for End-to-end Speech Translation",
author = "Zhao, Yunlong and
Wang, Kexin and
Dong, Qianqian and
Ko, Tom",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.lrec-main.1102/",
pages = "12592--12598",
abstract = "Recently, end-to-end speech translation (ST) has gained significant attention in research, but its progress is hindered by the limited availability of labeled data. To overcome this challenge, leveraging pre-trained models for knowledge transfer in ST has emerged as a promising direction. In this paper, we propose PETL-ST, which investigates parameter-efficient transfer learning for end-to-end speech translation. Our method utilizes two lightweight adaptation techniques, namely prefix and adapter, to modulate Attention and the Feed-Forward Network, respectively, while preserving the capabilities of pre-trained models. We conduct experiments on MuST-C En-De, Es, Fr, Ru datasets to evaluate the performance of our approach. The results demonstrate that PETL-ST outperforms strong baselines, achieving superior translation quality with high parameter efficiency. Moreover, our method exhibits remarkable data efficiency and significantly improves performance in low-resource settings."
}
Markdown (Informal)
[Parameter-Efficient Transfer Learning for End-to-end Speech Translation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.lrec-main.1102/) (Zhao et al., LREC-COLING 2024)
ACL