@inproceedings{xiao-etal-2023-cfsum,
title = "{CFS}um Coarse-to-Fine Contribution Network for Multimodal Summarization",
author = "Xiao, Min and
Zhu, Junnan and
Lin, Haitao and
Zhou, Yu and
Zong, Chengqing",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.476/",
doi = "10.18653/v1/2023.acl-long.476",
pages = "8538--8553",
abstract = "Multimodal summarization usually suffers from the problem that the contribution of the visual modality is unclear. Existing multimodal summarization approaches focus on designing the fusion methods of different modalities, while ignoring the adaptive conditions under which visual modalities are useful. Therefore, we propose a novel Coarse-to-Fine contribution network for multimodal Summarization (CFSum) to consider different contributions of images for summarization. First, to eliminate the interference of useless images, we propose a pre-filter module to abandon useless images. Second, to make accurate use of useful images, we propose two levels of visual complement modules, word level and phrase level. Specifically, image contributions are calculated and are adopted to guide the attention of both textual and visual modalities. Experimental results have shown that CFSum significantly outperforms multiple strong baselines on the standard benchmark. Furthermore, the analysis verifies that useful images can even help generate non-visual words which are implicitly represented in the image."
}
Markdown (Informal)
[CFSum Coarse-to-Fine Contribution Network for Multimodal Summarization](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.476/) (Xiao et al., ACL 2023)
ACL