@inproceedings{zeng-etal-2021-wechat,
title = "{W}e{C}hat Neural Machine Translation Systems for {WMT}21",
author = "Zeng, Xianfeng and
Liu, Yijin and
Li, Ernan and
Ran, Qiu and
Meng, Fandong and
Li, Peng and
Xu, Jinan and
Zhou, Jie",
booktitle = "Proceedings of the Sixth Conference on Machine Translation",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wmt-1.23",
pages = "243--254",
abstract = "This paper introduces WeChat AI{'}s participation in WMT 2021 shared news translation task on English-{\textgreater}Chinese, English-{\textgreater}Japanese, Japanese-{\textgreater}English and English-{\textgreater}German. Our systems are based on the Transformer (Vaswani et al., 2017) with several novel and effective variants. In our experiments, we employ data filtering, large-scale synthetic data generation (i.e., back-translation, knowledge distillation, forward-translation, iterative in-domain knowledge transfer), advanced finetuning approaches, and boosted Self-BLEU based model ensemble. Our constrained systems achieve 36.9, 46.9, 27.8 and 31.3 case-sensitive BLEU scores on English-{\textgreater}Chinese, English-{\textgreater}Japanese, Japanese-{\textgreater}English and English-{\textgreater}German, respectively. The BLEU scores of English-{\textgreater}Chinese, English-{\textgreater}Japanese and Japanese-{\textgreater}English are the highest among all submissions, and that of English-{\textgreater}German is the highest among all constrained submissions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zeng-etal-2021-wechat">
<titleInfo>
<title>WeChat Neural Machine Translation Systems for WMT21</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xianfeng</namePart>
<namePart type="family">Zeng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yijin</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ernan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qiu</namePart>
<namePart type="family">Ran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fandong</namePart>
<namePart type="family">Meng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peng</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinan</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jie</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth Conference on Machine Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper introduces WeChat AI’s participation in WMT 2021 shared news translation task on English-\textgreaterChinese, English-\textgreaterJapanese, Japanese-\textgreaterEnglish and English-\textgreaterGerman. Our systems are based on the Transformer (Vaswani et al., 2017) with several novel and effective variants. In our experiments, we employ data filtering, large-scale synthetic data generation (i.e., back-translation, knowledge distillation, forward-translation, iterative in-domain knowledge transfer), advanced finetuning approaches, and boosted Self-BLEU based model ensemble. Our constrained systems achieve 36.9, 46.9, 27.8 and 31.3 case-sensitive BLEU scores on English-\textgreaterChinese, English-\textgreaterJapanese, Japanese-\textgreaterEnglish and English-\textgreaterGerman, respectively. The BLEU scores of English-\textgreaterChinese, English-\textgreaterJapanese and Japanese-\textgreaterEnglish are the highest among all submissions, and that of English-\textgreaterGerman is the highest among all constrained submissions.</abstract>
<identifier type="citekey">zeng-etal-2021-wechat</identifier>
<location>
<url>https://aclanthology.org/2021.wmt-1.23</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>243</start>
<end>254</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T WeChat Neural Machine Translation Systems for WMT21
%A Zeng, Xianfeng
%A Liu, Yijin
%A Li, Ernan
%A Ran, Qiu
%A Meng, Fandong
%A Li, Peng
%A Xu, Jinan
%A Zhou, Jie
%S Proceedings of the Sixth Conference on Machine Translation
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Online
%F zeng-etal-2021-wechat
%X This paper introduces WeChat AI’s participation in WMT 2021 shared news translation task on English-\textgreaterChinese, English-\textgreaterJapanese, Japanese-\textgreaterEnglish and English-\textgreaterGerman. Our systems are based on the Transformer (Vaswani et al., 2017) with several novel and effective variants. In our experiments, we employ data filtering, large-scale synthetic data generation (i.e., back-translation, knowledge distillation, forward-translation, iterative in-domain knowledge transfer), advanced finetuning approaches, and boosted Self-BLEU based model ensemble. Our constrained systems achieve 36.9, 46.9, 27.8 and 31.3 case-sensitive BLEU scores on English-\textgreaterChinese, English-\textgreaterJapanese, Japanese-\textgreaterEnglish and English-\textgreaterGerman, respectively. The BLEU scores of English-\textgreaterChinese, English-\textgreaterJapanese and Japanese-\textgreaterEnglish are the highest among all submissions, and that of English-\textgreaterGerman is the highest among all constrained submissions.
%U https://aclanthology.org/2021.wmt-1.23
%P 243-254
Markdown (Informal)
[WeChat Neural Machine Translation Systems for WMT21](https://aclanthology.org/2021.wmt-1.23) (Zeng et al., WMT 2021)
ACL
- Xianfeng Zeng, Yijin Liu, Ernan Li, Qiu Ran, Fandong Meng, Peng Li, Jinan Xu, and Jie Zhou. 2021. WeChat Neural Machine Translation Systems for WMT21. In Proceedings of the Sixth Conference on Machine Translation, pages 243–254, Online. Association for Computational Linguistics.