@inproceedings{parida-bojar-2018-translating,
title = "Translating Short Segments with {NMT}: A Case Study in {E}nglish-to-{H}indi",
author = "Parida, Shantipriya and
Bojar, Ond{\v{r}}ej",
editor = "P{\'e}rez-Ortiz, Juan Antonio and
S{\'a}nchez-Mart{\'i}nez, Felipe and
Espl{\`a}-Gomis, Miquel and
Popovi{\'c}, Maja and
Rico, Celia and
Martins, Andr{\'e} and
Van den Bogaert, Joachim and
Forcada, Mikel L.",
booktitle = "Proceedings of the 21st Annual Conference of the European Association for Machine Translation",
month = may,
year = "2018",
address = "Alicante, Spain",
url = "https://preview.aclanthology.org/fix-sig-urls/2018.eamt-main.23/",
pages = "249--258",
abstract = "This paper presents a case study in translating short image captions of the Visual Genome dataset from English into Hindi using out-of-domain data sets of varying size. We experiment with three NMT models: the shallow and deep sequence-tosequence and the Transformer model as implemented in Marian toolkit. Phrase-based Moses serves as the baseline. The results indicate that the Transformer model outperforms others in the large data setting in a number of automatic metrics and manual evaluation, and it also produces the fewest truncated sentences. Transformer training is however very sensitive to the hyperparameters, so it requires more experimenting. The deep sequence-to-sequence model produced more flawless outputs in the small data setting and it was generally more stable, at the cost of more training iterations."
}
Markdown (Informal)
[Translating Short Segments with NMT: A Case Study in English-to-Hindi](https://preview.aclanthology.org/fix-sig-urls/2018.eamt-main.23/) (Parida & Bojar, EAMT 2018)
ACL