@inproceedings{wang-etal-2021-ji-yu-duo,
title = "基于多层次预训练策略和多任务学习的端到端蒙汉语音翻译(End-to-end {M}ongolian-{C}hinese Speech Translation Based on Multi-level Pre-training Strategies and Multi-task Learning)",
author = "Wang, Ningning and
Fei, Long and
Zhang, Hui",
editor = "Li, Sheng and
Sun, Maosong and
Liu, Yang and
Wu, Hua and
Liu, Kang and
Che, Wanxiang and
He, Shizhu and
Rao, Gaoqi",
booktitle = "Proceedings of the 20th Chinese National Conference on Computational Linguistics",
month = aug,
year = "2021",
address = "Huhhot, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.ccl-1.15/",
pages = "156--165",
language = "zho",
abstract = "端到端语音翻译将源语言语音直接翻译为目标语言文本,它需要{\textquotedblleft}源语言语音-目标语言文本{\textquotedblright}作为训练数据,然而这类数据极其稀缺,本文提出了一种多层次预训练策略和多任务学习相结合的训练方法,首先分别对语音识别和机器翻译模型的各个模块进行多层次预训练,接着将语音识别和机器翻译模型连接起来构成语音翻译模型,然后使用迁移学习对预训练好的模型进行多步骤微调,在此过程中又运用多任务学习的方法,将语音识别作为语音翻译的一个辅助任务来组织训练,充分利用了已经存在的各种不同形式的数据来训练端到端模型,首次将端到端技术应用于资源受限条件下的蒙汉语音翻译,构建了首个翻译质量较高、实际可用的端到端蒙汉语音翻译系统。"
}