@inproceedings{zhou-long-2023-multimodal,
title = "Multimodal Event Transformer for Image-guided Story Ending Generation",
author = "Zhou, Yucheng and
Long, Guodong",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.eacl-main.249/",
doi = "10.18653/v1/2023.eacl-main.249",
pages = "3434--3444",
abstract = "Image-guided story ending generation (IgSEG) is to generate a story ending based on given story plots and ending image. Existing methods focus on cross-modal feature fusion but overlook reasoning and mining implicit information from story plots and ending image. To tackle this drawback, we propose a multimodal event transformer, an event-based reasoning framework for IgSEG. Specifically, we construct visual and semantic event graphs from story plots and ending image, and leverage event-based reasoning to reason and mine implicit information in a single modality. Next, we connect visual and semantic event graphs and utilize cross-modal fusion to integrate different-modality features. In addition, we propose a multimodal injector to adaptive pass essential information to decoder. Besides, we present an incoherence detection to enhance the understanding context of a story plot and the robustness of graph modeling for our model. Experimental results show that our method achieves state-of-the-art performance for the image-guided story ending generation."
}
Markdown (Informal)
[Multimodal Event Transformer for Image-guided Story Ending Generation](https://preview.aclanthology.org/fix-sig-urls/2023.eacl-main.249/) (Zhou & Long, EACL 2023)
ACL