@inproceedings{wu-etal-2023-diffuvst,
title = "{D}iffu{VST}: Narrating Fictional Scenes with Global-History-Guided Denoising Models",
author = "Wu, Shengguang and
Yuan, Mei and
Su, Qi",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.126/",
doi = "10.18653/v1/2023.findings-emnlp.126",
pages = "1885--1896",
abstract = "Recent advances in image and video creation, especially AI-based image synthesis, have led to the production of numerous visual scenes that exhibit a high level of abstractness and diversity. Consequently, Visual Storytelling (VST), a task that involves generating meaningful and coherent narratives from a collection of images, has become even more challenging and is increasingly desired beyond real-world imagery. While existing VST techniques, which typically use autoregressive decoders, have made significant progress, they suffer from low inference speed and are not well-suited for synthetic scenes. To this end, we propose a novel diffusion-based system DiffuVST, which models the generation of a series of visual descriptions as a single conditional denoising process. The stochastic and non-autoregressive nature of DiffuVST at inference time allows it to generate highly diverse narratives more efficiently. In addition, DiffuVST features a unique design with bi-directional text history guidance and multimodal adapter modules, which effectively improve inter-sentence coherence and image-to-text fidelity. Extensive experiments on the story generation task covering four fictional visual-story datasets demonstrate the superiority of DiffuVST over traditional autoregressive models in terms of both text quality and inference speed."
}
Markdown (Informal)
[DiffuVST: Narrating Fictional Scenes with Global-History-Guided Denoising Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.126/) (Wu et al., Findings 2023)
ACL