@inproceedings{zhao-etal-2021-tmeku,
title = "{TMEKU} System for the {WAT}2021 Multimodal Translation Task",
author = "Zhao, Yuting and
Komachi, Mamoru and
Kajiwara, Tomoyuki and
Chu, Chenhui",
editor = "Nakazawa, Toshiaki and
Nakayama, Hideki and
Goto, Isao and
Mino, Hideya and
Ding, Chenchen and
Dabre, Raj and
Kunchukuttan, Anoop and
Higashiyama, Shohei and
Manabe, Hiroshi and
Pa, Win Pa and
Parida, Shantipriya and
Bojar, Ond{\v{r}}ej and
Chu, Chenhui and
Eriguchi, Akiko and
Abe, Kaori and
Oda, Yusuke and
Sudoh, Katsuhito and
Kurohashi, Sadao and
Bhattacharyya, Pushpak",
booktitle = "Proceedings of the 8th Workshop on Asian Translation (WAT2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2021.wat-1.20/",
doi = "10.18653/v1/2021.wat-1.20",
pages = "174--180",
abstract = "We introduce our TMEKU system submitted to the English-Japanese Multimodal Translation Task for WAT 2021. We participated in the Flickr30kEnt-JP task and Ambiguous MSCOCO Multimodal task under the constrained condition using only the officially provided datasets. Our proposed system employs soft alignment of word-region for multimodal neural machine translation (MNMT). The experimental results evaluated on the BLEU metric provided by the WAT 2021 evaluation site show that the TMEKU system has achieved the best performance among all the participated systems. Further analysis of the case study demonstrates that leveraging word-region alignment between the textual and visual modalities is the key to performance enhancement in our TMEKU system, which leads to better visual information use."
}
Markdown (Informal)
[TMEKU System for the WAT2021 Multimodal Translation Task](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2021.wat-1.20/) (Zhao et al., WAT 2021)
ACL