@inproceedings{junczys-dowmunt-2018-dual,
title = "Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora",
author = "Junczys-Dowmunt, Marcin",
booktitle = "Proceedings of the Third Conference on Machine Translation: Shared Task Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-6478",
doi = "10.18653/v1/W18-6478",
pages = "888--895",
abstract = "In this work we introduce dual conditional cross-entropy filtering for noisy parallel data. For each sentence pair of the noisy parallel corpus we compute cross-entropy scores according to two inverse translation models trained on clean data. We penalize divergent cross-entropies and weigh the penalty by the cross-entropy average of both models. Sorting or thresholding according to these scores results in better subsets of parallel data. We achieve higher BLEU scores with models trained on parallel data filtered only from Paracrawl than with models trained on clean WMT data. We further evaluate our method in the context of the WMT2018 shared task on parallel corpus filtering and achieve the overall highest ranking scores of the shared task, scoring top in three out of four subtasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="junczys-dowmunt-2018-dual">
<titleInfo>
<title>Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcin</namePart>
<namePart type="family">Junczys-Dowmunt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Conference on Machine Translation: Shared Task Papers</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Belgium, Brussels</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this work we introduce dual conditional cross-entropy filtering for noisy parallel data. For each sentence pair of the noisy parallel corpus we compute cross-entropy scores according to two inverse translation models trained on clean data. We penalize divergent cross-entropies and weigh the penalty by the cross-entropy average of both models. Sorting or thresholding according to these scores results in better subsets of parallel data. We achieve higher BLEU scores with models trained on parallel data filtered only from Paracrawl than with models trained on clean WMT data. We further evaluate our method in the context of the WMT2018 shared task on parallel corpus filtering and achieve the overall highest ranking scores of the shared task, scoring top in three out of four subtasks.</abstract>
<identifier type="citekey">junczys-dowmunt-2018-dual</identifier>
<identifier type="doi">10.18653/v1/W18-6478</identifier>
<location>
<url>https://aclanthology.org/W18-6478</url>
</location>
<part>
<date>2018-oct</date>
<extent unit="page">
<start>888</start>
<end>895</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora
%A Junczys-Dowmunt, Marcin
%S Proceedings of the Third Conference on Machine Translation: Shared Task Papers
%D 2018
%8 oct
%I Association for Computational Linguistics
%C Belgium, Brussels
%F junczys-dowmunt-2018-dual
%X In this work we introduce dual conditional cross-entropy filtering for noisy parallel data. For each sentence pair of the noisy parallel corpus we compute cross-entropy scores according to two inverse translation models trained on clean data. We penalize divergent cross-entropies and weigh the penalty by the cross-entropy average of both models. Sorting or thresholding according to these scores results in better subsets of parallel data. We achieve higher BLEU scores with models trained on parallel data filtered only from Paracrawl than with models trained on clean WMT data. We further evaluate our method in the context of the WMT2018 shared task on parallel corpus filtering and achieve the overall highest ranking scores of the shared task, scoring top in three out of four subtasks.
%R 10.18653/v1/W18-6478
%U https://aclanthology.org/W18-6478
%U https://doi.org/10.18653/v1/W18-6478
%P 888-895
Markdown (Informal)
[Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora](https://aclanthology.org/W18-6478) (Junczys-Dowmunt, 2018)
ACL