@inproceedings{wang-etal-2020-sparsity,
title = "On the Sparsity of Neural Machine Translation Models",
author = "Wang, Yong and
Wang, Longyue and
Li, Victor and
Tu, Zhaopeng",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.78",
doi = "10.18653/v1/2020.emnlp-main.78",
pages = "1060--1066",
abstract = "Modern neural machine translation (NMT) models employ a large number of parameters, which leads to serious over-parameterization and typically causes the underutilization of computational resources. In response to this problem, we empirically investigate whether the redundant parameters can be reused to achieve better performance. Experiments and analyses are systematically conducted on different datasets and NMT architectures. We show that: 1) the pruned parameters can be rejuvenated to improve the baseline model by up to +0.8 BLEU points; 2) the rejuvenated parameters are reallocated to enhance the ability of modeling low-level lexical information.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2020-sparsity">
<titleInfo>
<title>On the Sparsity of Neural Machine Translation Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yong</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Longyue</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Victor</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhaopeng</namePart>
<namePart type="family">Tu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modern neural machine translation (NMT) models employ a large number of parameters, which leads to serious over-parameterization and typically causes the underutilization of computational resources. In response to this problem, we empirically investigate whether the redundant parameters can be reused to achieve better performance. Experiments and analyses are systematically conducted on different datasets and NMT architectures. We show that: 1) the pruned parameters can be rejuvenated to improve the baseline model by up to +0.8 BLEU points; 2) the rejuvenated parameters are reallocated to enhance the ability of modeling low-level lexical information.</abstract>
<identifier type="citekey">wang-etal-2020-sparsity</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.78</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.78</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>1060</start>
<end>1066</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On the Sparsity of Neural Machine Translation Models
%A Wang, Yong
%A Wang, Longyue
%A Li, Victor
%A Tu, Zhaopeng
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F wang-etal-2020-sparsity
%X Modern neural machine translation (NMT) models employ a large number of parameters, which leads to serious over-parameterization and typically causes the underutilization of computational resources. In response to this problem, we empirically investigate whether the redundant parameters can be reused to achieve better performance. Experiments and analyses are systematically conducted on different datasets and NMT architectures. We show that: 1) the pruned parameters can be rejuvenated to improve the baseline model by up to +0.8 BLEU points; 2) the rejuvenated parameters are reallocated to enhance the ability of modeling low-level lexical information.
%R 10.18653/v1/2020.emnlp-main.78
%U https://aclanthology.org/2020.emnlp-main.78
%U https://doi.org/10.18653/v1/2020.emnlp-main.78
%P 1060-1066
Markdown (Informal)
[On the Sparsity of Neural Machine Translation Models](https://aclanthology.org/2020.emnlp-main.78) (Wang et al., EMNLP 2020)
ACL
- Yong Wang, Longyue Wang, Victor Li, and Zhaopeng Tu. 2020. On the Sparsity of Neural Machine Translation Models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1060–1066, Online. Association for Computational Linguistics.