@inproceedings{jin-etal-2020-dual,
title = "Dual Low-Rank Multimodal Fusion",
author = "Jin, Tao and
Huang, Siyu and
Li, Yingming and
Zhang, Zhongfei",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.35/",
doi = "10.18653/v1/2020.findings-emnlp.35",
pages = "377--387",
abstract = "Tensor-based fusion methods have been proven effective in multimodal fusion tasks. However, existing tensor-based methods make a poor use of the fine-grained temporal dynamics of multimodal sequential features. Motivated by this observation, this paper proposes a novel multimodal fusion method called Fine-Grained Temporal Low-Rank Multimodal Fusion (FT-LMF). FT-LMF correlates the features of individual time steps between multiple modalities, while it involves multiplications of high-order tensors in its calculation. This paper further proposes Dual Low-Rank Multimodal Fusion (Dual-LMF) to reduce the computational complexity of FT-LMF through low-rank tensor approximation along dual dimensions of input features. Dual-LMF is conceptually simple and practically effective and efficient. Empirical studies on benchmark multimodal analysis tasks show that our proposed methods outperform the state-of-the-art tensor-based fusion methods with a similar computational complexity."
}
Markdown (Informal)
[Dual Low-Rank Multimodal Fusion](https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.35/) (Jin et al., Findings 2020)
ACL
- Tao Jin, Siyu Huang, Yingming Li, and Zhongfei Zhang. 2020. Dual Low-Rank Multimodal Fusion. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 377–387, Online. Association for Computational Linguistics.