@inproceedings{wang-etal-2020-tencent-ai,
title = "Tencent {AI} Lab Machine Translation Systems for the {WMT}20 Biomedical Translation Task",
author = "Wang, Xing and
Tu, Zhaopeng and
Wang, Longyue and
Shi, Shuming",
editor = {Barrault, Lo{\"i}c and
Bojar, Ond{\v{r}}ej and
Bougares, Fethi and
Chatterjee, Rajen and
Costa-juss{\`a}, Marta R. and
Federmann, Christian and
Fishel, Mark and
Fraser, Alexander and
Graham, Yvette and
Guzman, Paco and
Haddow, Barry and
Huck, Matthias and
Yepes, Antonio Jimeno and
Koehn, Philipp and
Martins, Andr{\'e} and
Morishita, Makoto and
Monz, Christof and
Nagata, Masaaki and
Nakazawa, Toshiaki and
Negri, Matteo},
booktitle = "Proceedings of the Fifth Conference on Machine Translation",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2020.wmt-1.97/",
pages = "881--886",
abstract = "This paper describes the Tencent AI Lab submission of the WMT2020 shared task on biomedical translation in four language directions: German{\ensuremath{<}}-{\ensuremath{>}}English, English{\ensuremath{<}}-{\ensuremath{>}}German, Chinese{\ensuremath{<}}-{\ensuremath{>}}English and English{\ensuremath{<}}-{\ensuremath{>}}Chinese. We implement our system with model ensemble technique on different transformer architectures (Deep, Hybrid, Big, Large Transformers). To enlarge the in-domain bilingual corpus, we use back-translation of monolingual in-domain data in the target language as additional in-domain training data. Our systems in German-{\ensuremath{>}}English and English-{\ensuremath{>}}German are ranked 1st and 3rd respectively according to the official evaluation results in terms of BLEU scores."
}
Markdown (Informal)
[Tencent AI Lab Machine Translation Systems for the WMT20 Biomedical Translation Task](https://preview.aclanthology.org/ingest_wac_2008/2020.wmt-1.97/) (Wang et al., WMT 2020)
ACL