@inproceedings{takebayashi-etal-2018-word,
title = "Word Rewarding for Adequate Neural Machine Translation",
author = "Takebayashi, Yuto and
Chenhui, Chu and
Arase{\dag}, Yuki and
Nagata, Masaaki",
booktitle = "Proceedings of the 15th International Conference on Spoken Language Translation",
month = oct # " 29-30",
year = "2018",
address = "Brussels",
publisher = "International Conference on Spoken Language Translation",
url = "https://aclanthology.org/2018.iwslt-1.3",
pages = "14--22",
abstract = "To improve the translation adequacy in neural machine translation (NMT), we propose a rewarding model with target word prediction using bilingual dictionaries inspired by the success of decoder constraints in statistical machine translation. In particular, the model first predicts a set of target words promising for translation; then boosts the probabilities of the predicted words to give them better chances to be output. Our rewarding model minimally interacts with the decoder so that it can be easily applied to the decoder of an existing NMT system. Extensive evaluation under both resource-rich and resource-poor settings shows that (1) BLEU score improves more than 10 points with oracle prediction, (2) BLEU score improves about 1.0 point with target word prediction using bilingual dictionaries created either manually or automatically, (3) hyper-parameters of our model are relatively easy to optimize, and (4) undergeneration problem can be alleviated in exchange for increasing over-generated words.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="takebayashi-etal-2018-word">
<titleInfo>
<title>Word Rewarding for Adequate Neural Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuto</namePart>
<namePart type="family">Takebayashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chu</namePart>
<namePart type="family">Chenhui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuki</namePart>
<namePart type="family">Arase\dag</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masaaki</namePart>
<namePart type="family">Nagata</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct" 29-30"</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Spoken Language Translation</title>
</titleInfo>
<originInfo>
<publisher>International Conference on Spoken Language Translation</publisher>
<place>
<placeTerm type="text">Brussels</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>To improve the translation adequacy in neural machine translation (NMT), we propose a rewarding model with target word prediction using bilingual dictionaries inspired by the success of decoder constraints in statistical machine translation. In particular, the model first predicts a set of target words promising for translation; then boosts the probabilities of the predicted words to give them better chances to be output. Our rewarding model minimally interacts with the decoder so that it can be easily applied to the decoder of an existing NMT system. Extensive evaluation under both resource-rich and resource-poor settings shows that (1) BLEU score improves more than 10 points with oracle prediction, (2) BLEU score improves about 1.0 point with target word prediction using bilingual dictionaries created either manually or automatically, (3) hyper-parameters of our model are relatively easy to optimize, and (4) undergeneration problem can be alleviated in exchange for increasing over-generated words.</abstract>
<identifier type="citekey">takebayashi-etal-2018-word</identifier>
<location>
<url>https://aclanthology.org/2018.iwslt-1.3</url>
</location>
<part>
<date>2018-oct" 29-30"</date>
<extent unit="page">
<start>14</start>
<end>22</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Word Rewarding for Adequate Neural Machine Translation
%A Takebayashi, Yuto
%A Chenhui, Chu
%A Arase\dag, Yuki
%A Nagata, Masaaki
%S Proceedings of the 15th International Conference on Spoken Language Translation
%D 2018
%8 oct" 29 30"
%I International Conference on Spoken Language Translation
%C Brussels
%F takebayashi-etal-2018-word
%X To improve the translation adequacy in neural machine translation (NMT), we propose a rewarding model with target word prediction using bilingual dictionaries inspired by the success of decoder constraints in statistical machine translation. In particular, the model first predicts a set of target words promising for translation; then boosts the probabilities of the predicted words to give them better chances to be output. Our rewarding model minimally interacts with the decoder so that it can be easily applied to the decoder of an existing NMT system. Extensive evaluation under both resource-rich and resource-poor settings shows that (1) BLEU score improves more than 10 points with oracle prediction, (2) BLEU score improves about 1.0 point with target word prediction using bilingual dictionaries created either manually or automatically, (3) hyper-parameters of our model are relatively easy to optimize, and (4) undergeneration problem can be alleviated in exchange for increasing over-generated words.
%U https://aclanthology.org/2018.iwslt-1.3
%P 14-22
Markdown (Informal)
[Word Rewarding for Adequate Neural Machine Translation](https://aclanthology.org/2018.iwslt-1.3) (Takebayashi et al., IWSLT 2018)
ACL
- Yuto Takebayashi, Chu Chenhui, Yuki Arase†, and Masaaki Nagata. 2018. Word Rewarding for Adequate Neural Machine Translation. In Proceedings of the 15th International Conference on Spoken Language Translation, pages 14–22, Brussels. International Conference on Spoken Language Translation.