@inproceedings{li-etal-2021-unsupervised-neural,
title = "Unsupervised Neural Machine Translation with Universal Grammar",
author = "Li, Zuchao and
Utiyama, Masao and
Sumita, Eiichiro and
Zhao, Hai",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.261",
doi = "10.18653/v1/2021.emnlp-main.261",
pages = "3249--3264",
abstract = "Machine translation usually relies on parallel corpora to provide parallel signals for training. The advent of unsupervised machine translation has brought machine translation away from this reliance, though performance still lags behind traditional supervised machine translation. In unsupervised machine translation, the model seeks symmetric language similarities as a source of weak parallel signal to achieve translation. Chomsky{'}s Universal Grammar theory postulates that grammar is an innate form of knowledge to humans and is governed by universal principles and constraints. Therefore, in this paper, we seek to leverage such shared grammar clues to provide more explicit language parallel signals to enhance the training of unsupervised machine translation models. Through experiments on multiple typical language pairs, we demonstrate the effectiveness of our proposed approaches.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2021-unsupervised-neural">
<titleInfo>
<title>Unsupervised Neural Machine Translation with Universal Grammar</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zuchao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masao</namePart>
<namePart type="family">Utiyama</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eiichiro</namePart>
<namePart type="family">Sumita</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hai</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Machine translation usually relies on parallel corpora to provide parallel signals for training. The advent of unsupervised machine translation has brought machine translation away from this reliance, though performance still lags behind traditional supervised machine translation. In unsupervised machine translation, the model seeks symmetric language similarities as a source of weak parallel signal to achieve translation. Chomsky’s Universal Grammar theory postulates that grammar is an innate form of knowledge to humans and is governed by universal principles and constraints. Therefore, in this paper, we seek to leverage such shared grammar clues to provide more explicit language parallel signals to enhance the training of unsupervised machine translation models. Through experiments on multiple typical language pairs, we demonstrate the effectiveness of our proposed approaches.</abstract>
<identifier type="citekey">li-etal-2021-unsupervised-neural</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.261</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.261</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>3249</start>
<end>3264</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Unsupervised Neural Machine Translation with Universal Grammar
%A Li, Zuchao
%A Utiyama, Masao
%A Sumita, Eiichiro
%A Zhao, Hai
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F li-etal-2021-unsupervised-neural
%X Machine translation usually relies on parallel corpora to provide parallel signals for training. The advent of unsupervised machine translation has brought machine translation away from this reliance, though performance still lags behind traditional supervised machine translation. In unsupervised machine translation, the model seeks symmetric language similarities as a source of weak parallel signal to achieve translation. Chomsky’s Universal Grammar theory postulates that grammar is an innate form of knowledge to humans and is governed by universal principles and constraints. Therefore, in this paper, we seek to leverage such shared grammar clues to provide more explicit language parallel signals to enhance the training of unsupervised machine translation models. Through experiments on multiple typical language pairs, we demonstrate the effectiveness of our proposed approaches.
%R 10.18653/v1/2021.emnlp-main.261
%U https://aclanthology.org/2021.emnlp-main.261
%U https://doi.org/10.18653/v1/2021.emnlp-main.261
%P 3249-3264
Markdown (Informal)
[Unsupervised Neural Machine Translation with Universal Grammar](https://aclanthology.org/2021.emnlp-main.261) (Li et al., EMNLP 2021)
ACL
- Zuchao Li, Masao Utiyama, Eiichiro Sumita, and Hai Zhao. 2021. Unsupervised Neural Machine Translation with Universal Grammar. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3249–3264, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.