@inproceedings{cheng-zhang-2022-con,
title = "Con-{NAT}: Contrastive Non-autoregressive Neural Machine Translation",
author = "Cheng, Hao and
Zhang, Zhihua",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.463/",
doi = "10.18653/v1/2022.findings-emnlp.463",
pages = "6219--6231",
abstract = "Inspired by the success of contrastive learning in natural language processing, we incorporate contrastive learning into the conditional masked language model which is extensively used in non-autoregressive neural machine translation (NAT). Accordingly, we propose a Contrastive Non-autoregressive Neural Machine Translation (Con-NAT) model. Con-NAT optimizes the similarity of several different representations of the same token in the same sentence. We propose two methods to obtain various representations: Contrastive Common Mask and Contrastive Dropout. Positive pairs are various different representations of the same token, while negative pairs are representations of different tokens. In the feature space, the model with contrastive loss pulls positive pairs together and pushes negative pairs away. We conduct extensive experiments on six translation directions with different data sizes. The results demonstrate that Con-NAT showed a consistent and significant improvement in fully and iterative NAT. Con-NAT is state-of-the-art on WMT{'}16 Ro-En (34.18 BLEU)."
}
Markdown (Informal)
[Con-NAT: Contrastive Non-autoregressive Neural Machine Translation](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.463/) (Cheng & Zhang, Findings 2022)
ACL