@inproceedings{baruah-etal-2020-nlprl,
title = "{NLPRL} System for Very Low Resource Supervised Machine Translation",
author = "Baruah, Rupjyoti and
Mundotiya, Rajesh Kumar and
Kumar, Amit and
Singh, Anil kumar",
booktitle = "Proceedings of the Fifth Conference on Machine Translation",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.wmt-1.126",
pages = "1075--1078",
abstract = "This paper describes the results of the system that we used for the WMT20 very low resource (VLR) supervised MT shared task. For our experiments, we use a byte-level version of BPE, which requires a base vocabulary of size 256 only. BPE based models are a kind of sub-word models. Such models try to address the Out of Vocabulary (OOV) word problem by performing word segmentation so that segments correspond to morphological units. They are also reported to work across different languages, especially similar languages due to their sub-word nature. Based on BLEU cased score, our NLPRL systems ranked ninth for HSB to GER and tenth in GER to HSB translation scenario.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="baruah-etal-2020-nlprl">
<titleInfo>
<title>NLPRL System for Very Low Resource Supervised Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rupjyoti</namePart>
<namePart type="family">Baruah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rajesh</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Mundotiya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amit</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anil</namePart>
<namePart type="given">kumar</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Conference on Machine Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the results of the system that we used for the WMT20 very low resource (VLR) supervised MT shared task. For our experiments, we use a byte-level version of BPE, which requires a base vocabulary of size 256 only. BPE based models are a kind of sub-word models. Such models try to address the Out of Vocabulary (OOV) word problem by performing word segmentation so that segments correspond to morphological units. They are also reported to work across different languages, especially similar languages due to their sub-word nature. Based on BLEU cased score, our NLPRL systems ranked ninth for HSB to GER and tenth in GER to HSB translation scenario.</abstract>
<identifier type="citekey">baruah-etal-2020-nlprl</identifier>
<location>
<url>https://aclanthology.org/2020.wmt-1.126</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>1075</start>
<end>1078</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NLPRL System for Very Low Resource Supervised Machine Translation
%A Baruah, Rupjyoti
%A Mundotiya, Rajesh Kumar
%A Kumar, Amit
%A Singh, Anil kumar
%S Proceedings of the Fifth Conference on Machine Translation
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F baruah-etal-2020-nlprl
%X This paper describes the results of the system that we used for the WMT20 very low resource (VLR) supervised MT shared task. For our experiments, we use a byte-level version of BPE, which requires a base vocabulary of size 256 only. BPE based models are a kind of sub-word models. Such models try to address the Out of Vocabulary (OOV) word problem by performing word segmentation so that segments correspond to morphological units. They are also reported to work across different languages, especially similar languages due to their sub-word nature. Based on BLEU cased score, our NLPRL systems ranked ninth for HSB to GER and tenth in GER to HSB translation scenario.
%U https://aclanthology.org/2020.wmt-1.126
%P 1075-1078
Markdown (Informal)
[NLPRL System for Very Low Resource Supervised Machine Translation](https://aclanthology.org/2020.wmt-1.126) (Baruah et al., WMT 2020)
ACL