@inproceedings{mohammadshahi-etal-2019-aligning,
title = "Aligning Multilingual Word Embeddings for Cross-Modal Retrieval Task",
author = "Mohammadshahi, Alireza and
Lebret, R{\'e}mi and
Aberer, Karl",
editor = "Mogadala, Aditya and
Klakow, Dietrich and
Pezzelle, Sandro and
Moens, Marie-Francine",
booktitle = "Proceedings of the Beyond Vision and LANguage: inTEgrating Real-world kNowledge (LANTERN)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-6402",
doi = "10.18653/v1/D19-6402",
pages = "11--17",
abstract = "In this paper, we propose a new approach to learn multimodal multilingual embeddings for matching images and their relevant captions in two languages. We combine two existing objective functions to make images and captions close in a joint embedding space while adapting the alignment of word embeddings between existing languages in our model. We show that our approach enables better generalization, achieving state-of-the-art performance in text-to-image and image-to-text retrieval task, and caption-caption similarity task. Two multimodal multilingual datasets are used for evaluation: Multi30k with German and English captions and Microsoft-COCO with English and Japanese captions.",
}
Markdown (Informal)
[Aligning Multilingual Word Embeddings for Cross-Modal Retrieval Task](https://aclanthology.org/D19-6402) (Mohammadshahi et al., 2019)
ACL