@inproceedings{morency-baltrusaitis-2017-multimodal,
title = "Multimodal Machine Learning: Integrating Language, Vision and Speech",
author = "Morency, Louis-Philippe and
Baltru{\v{s}}aitis, Tadas",
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P17-5002",
pages = "3--5",
abstract = "Multimodal machine learning is a vibrant multi-disciplinary research field which addresses some of the original goals of artificial intelligence by integrating and modeling multiple communicative modalities, including linguistic, acoustic and visual messages. With the initial research on audio-visual speech recognition and more recently with image and video captioning projects, this research field brings some unique challenges for multimodal researchers given the heterogeneity of the data and the contingency often found between modalities.This tutorial builds upon a recent course taught at Carnegie Mellon University during the Spring 2016 semester (CMU course 11-777) and two tutorials presented at CVPR 2016 and ICMI 2016. The present tutorial will review fundamental concepts of machine learning and deep neural networks before describing the five main challenges in multimodal machine learning: (1) multimodal representation learning, (2) translation {\&} mapping, (3) modality alignment, (4) multimodal fusion and (5) co-learning. The tutorial will also present state-of-the-art algorithms that were recently proposed to solve multimodal applications such as image captioning, video descriptions and visual question-answer. We will also discuss the current and upcoming challenges.",
}
Markdown (Informal)
[Multimodal Machine Learning: Integrating Language, Vision and Speech](https://aclanthology.org/P17-5002) (Morency & Baltrušaitis, ACL 2017)
ACL