@inproceedings{cui-etal-2020-njus,
title = "{NJU}{'}s submission to the {WMT}20 {QE} Shared Task",
author = "Cui, Qu and
Geng, Xiang and
Huang, Shujian and
Chen, Jiajun",
booktitle = "Proceedings of the Fifth Conference on Machine Translation",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.wmt-1.115",
pages = "1004--1009",
abstract = "This paper describes our system of the sentence-level and word-level Quality Estimation Shared Task of WMT20. Our system is based on the QE Brain, and we simply enhance it by injecting noise at the target side. And to obtain the deep bi-directional information, we use a masked language model at the target side instead of two single directional decoders. Meanwhile, we try to use the extra QE data from the WMT17 and WMT19 to improve our system{'}s performance. Finally, we ensemble the features or the results from different models to get our best results. Our system finished fifth in the end at sentence-level on both EN-ZH and EN-DE language pairs.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="cui-etal-2020-njus">
<titleInfo>
<title>NJU’s submission to the WMT20 QE Shared Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Qu</namePart>
<namePart type="family">Cui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Geng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shujian</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiajun</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Conference on Machine Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes our system of the sentence-level and word-level Quality Estimation Shared Task of WMT20. Our system is based on the QE Brain, and we simply enhance it by injecting noise at the target side. And to obtain the deep bi-directional information, we use a masked language model at the target side instead of two single directional decoders. Meanwhile, we try to use the extra QE data from the WMT17 and WMT19 to improve our system’s performance. Finally, we ensemble the features or the results from different models to get our best results. Our system finished fifth in the end at sentence-level on both EN-ZH and EN-DE language pairs.</abstract>
<identifier type="citekey">cui-etal-2020-njus</identifier>
<location>
<url>https://aclanthology.org/2020.wmt-1.115</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>1004</start>
<end>1009</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NJU’s submission to the WMT20 QE Shared Task
%A Cui, Qu
%A Geng, Xiang
%A Huang, Shujian
%A Chen, Jiajun
%S Proceedings of the Fifth Conference on Machine Translation
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F cui-etal-2020-njus
%X This paper describes our system of the sentence-level and word-level Quality Estimation Shared Task of WMT20. Our system is based on the QE Brain, and we simply enhance it by injecting noise at the target side. And to obtain the deep bi-directional information, we use a masked language model at the target side instead of two single directional decoders. Meanwhile, we try to use the extra QE data from the WMT17 and WMT19 to improve our system’s performance. Finally, we ensemble the features or the results from different models to get our best results. Our system finished fifth in the end at sentence-level on both EN-ZH and EN-DE language pairs.
%U https://aclanthology.org/2020.wmt-1.115
%P 1004-1009
Markdown (Informal)
[NJU’s submission to the WMT20 QE Shared Task](https://aclanthology.org/2020.wmt-1.115) (Cui et al., WMT 2020)
ACL
- Qu Cui, Xiang Geng, Shujian Huang, and Jiajun Chen. 2020. NJU’s submission to the WMT20 QE Shared Task. In Proceedings of the Fifth Conference on Machine Translation, pages 1004–1009, Online. Association for Computational Linguistics.