@inproceedings{li-etal-2020-end,
title = "End-to-End Speech Translation with Adversarial Training",
author = "Li, Xuancai and
Kehai, Chen and
Zhao, Tiejun and
Yang, Muyun",
editor = "Wu, Hua and
Cherry, Colin and
Huang, Liang and
He, Zhongjun and
Liberman, Mark and
Cross, James and
Liu, Yang",
booktitle = "Proceedings of the First Workshop on Automatic Simultaneous Translation",
month = jul,
year = "2020",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.autosimtrans-1.2/",
doi = "10.18653/v1/2020.autosimtrans-1.2",
pages = "10--14",
abstract = "End-to-End speech translation usually leverages audio-to-text parallel data to train an available speech translation model which has shown impressive results on various speech translation tasks. Due to the artificial cost of collecting audio-to-text parallel data, the speech translation is a natural low-resource translation scenario, which greatly hinders its improvement. In this paper, we proposed a new adversarial training method to leverage target monolingual data to relieve the low-resource shortcoming of speech translation. In our method, the existing speech translation model is considered as a Generator to gain a target language output, and another neural Discriminator is used to guide the distinction between outputs of speech translation model and true target monolingual sentences. Experimental results on the CCMT 2019-BSTC dataset speech translation task demonstrate that the proposed methods can significantly improve the performance of the End-to-End speech translation system."
}
Markdown (Informal)
[End-to-End Speech Translation with Adversarial Training](https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.autosimtrans-1.2/) (Li et al., AutoSimTrans 2020)
ACL