@inproceedings{fukuda-etal-2021-knowledge,
title = "On Knowledge Distillation for Translating Erroneous Speech Transcriptions",
author = "Fukuda, Ryo and
Sudoh, Katsuhito and
Nakamura, Satoshi",
editor = "Federico, Marcello and
Waibel, Alex and
Costa-juss{\`a}, Marta R. and
Niehues, Jan and
Stuker, Sebastian and
Salesky, Elizabeth",
booktitle = "Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)",
month = aug,
year = "2021",
address = "Bangkok, Thailand (online)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.iwslt-1.24/",
doi = "10.18653/v1/2021.iwslt-1.24",
pages = "198--205",
abstract = "Recent studies argue that knowledge distillation is promising for speech translation (ST) using end-to-end models. In this work, we investigate the effect of knowledge distillation with a cascade ST using automatic speech recognition (ASR) and machine translation (MT) models. We distill knowledge from a teacher model based on human transcripts to a student model based on erroneous transcriptions. Our experimental results demonstrated that knowledge distillation is beneficial for a cascade ST. Further investigation that combined knowledge distillation and fine-tuning revealed that the combination consistently improved two language pairs: English-Italian and Spanish-English."
}
Markdown (Informal)
[On Knowledge Distillation for Translating Erroneous Speech Transcriptions](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.iwslt-1.24/) (Fukuda et al., IWSLT 2021)
ACL