@inproceedings{senellart-etal-2018-opennmt,
title = "{O}pen{NMT} System Description for {WNMT} 2018: 800 words/sec on a single-core {CPU}",
author = "Senellart, Jean and
Zhang, Dakun and
Wang, Bo and
Klein, Guillaume and
Ramatchandirin, Jean-Pierre and
Crego, Josep and
Rush, Alexander",
booktitle = "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-2715",
doi = "10.18653/v1/W18-2715",
pages = "122--128",
abstract = "We present a system description of the OpenNMT Neural Machine Translation entry for the WNMT 2018 evaluation. In this work, we developed a heavily optimized NMT inference model targeting a high-performance CPU system. The final system uses a combination of four techniques, all of them lead to significant speed-ups in combination: (a) sequence distillation, (b) architecture modifications, (c) precomputation, particularly of vocabulary, and (d) CPU targeted quantization. This work achieves the fastest performance of the shared task, and led to the development of new features that have been integrated to OpenNMT and available to the community.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="senellart-etal-2018-opennmt">
<titleInfo>
<title>OpenNMT System Description for WNMT 2018: 800 words/sec on a single-core CPU</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jean</namePart>
<namePart type="family">Senellart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dakun</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bo</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guillaume</namePart>
<namePart type="family">Klein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jean-Pierre</namePart>
<namePart type="family">Ramatchandirin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Josep</namePart>
<namePart type="family">Crego</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Rush</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Neural Machine Translation and Generation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a system description of the OpenNMT Neural Machine Translation entry for the WNMT 2018 evaluation. In this work, we developed a heavily optimized NMT inference model targeting a high-performance CPU system. The final system uses a combination of four techniques, all of them lead to significant speed-ups in combination: (a) sequence distillation, (b) architecture modifications, (c) precomputation, particularly of vocabulary, and (d) CPU targeted quantization. This work achieves the fastest performance of the shared task, and led to the development of new features that have been integrated to OpenNMT and available to the community.</abstract>
<identifier type="citekey">senellart-etal-2018-opennmt</identifier>
<identifier type="doi">10.18653/v1/W18-2715</identifier>
<location>
<url>https://aclanthology.org/W18-2715</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>122</start>
<end>128</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T OpenNMT System Description for WNMT 2018: 800 words/sec on a single-core CPU
%A Senellart, Jean
%A Zhang, Dakun
%A Wang, Bo
%A Klein, Guillaume
%A Ramatchandirin, Jean-Pierre
%A Crego, Josep
%A Rush, Alexander
%S Proceedings of the 2nd Workshop on Neural Machine Translation and Generation
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F senellart-etal-2018-opennmt
%X We present a system description of the OpenNMT Neural Machine Translation entry for the WNMT 2018 evaluation. In this work, we developed a heavily optimized NMT inference model targeting a high-performance CPU system. The final system uses a combination of four techniques, all of them lead to significant speed-ups in combination: (a) sequence distillation, (b) architecture modifications, (c) precomputation, particularly of vocabulary, and (d) CPU targeted quantization. This work achieves the fastest performance of the shared task, and led to the development of new features that have been integrated to OpenNMT and available to the community.
%R 10.18653/v1/W18-2715
%U https://aclanthology.org/W18-2715
%U https://doi.org/10.18653/v1/W18-2715
%P 122-128
Markdown (Informal)
[OpenNMT System Description for WNMT 2018: 800 words/sec on a single-core CPU](https://aclanthology.org/W18-2715) (Senellart et al., 2018)
ACL