@inproceedings{liao-etal-2020-multi,
title = "Multi-Agent Mutual Learning at Sentence-Level and Token-Level for Neural Machine Translation",
author = "Liao, Baohao and
Gao, Yingbo and
Ney, Hermann",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.findings-emnlp.155/",
doi = "10.18653/v1/2020.findings-emnlp.155",
pages = "1715--1724",
abstract = "Mutual learning, where multiple agents learn collaboratively and teach one another, has been shown to be an effective way to distill knowledge for image classification tasks. In this paper, we extend mutual learning to the machine translation task and operate at both the sentence-level and the token-level. Firstly, we co-train multiple agents by using the same parallel corpora. After convergence, each agent selects and learns its poorly predicted tokens from other agents. The poorly predicted tokens are determined by the acceptance-rejection sampling algorithm. Our experiments show that sequential mutual learning at the sentence-level and the token-level improves the results cumulatively. Absolute improvements compared to strong baselines are obtained on various translation tasks. On the IWSLT`14 German-English task, we get a new state-of-the-art BLEU score of 37.0. We also report a competitive result, 29.9 BLEU score, on the WMT`14 English-German task."
}
Markdown (Informal)
[Multi-Agent Mutual Learning at Sentence-Level and Token-Level for Neural Machine Translation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.findings-emnlp.155/) (Liao et al., Findings 2020)
ACL