@inproceedings{chung-yu-2023-vlis,
title = "{VLIS}: Unimodal Language Models Guide Multimodal Language Generation",
author = "Chung, Jiwan and
Yu, Youngjae",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.46/",
doi = "10.18653/v1/2023.emnlp-main.46",
pages = "700--721",
abstract = "Multimodal language generation, which leverages the synergy of language and vision, is a rapidly expanding field. However, existing vision-language models face challenges in tasks that require complex linguistic understanding. To address this issue, we introduce Visual-Language models as Importance Sampling weights (VLIS), a novel framework that combines the visual conditioning capability of vision-language models with the language understanding of unimodal text-only language models without further training. It extracts pointwise mutual information of each image and text from a visual-language model and uses the value as an importance sampling weight to adjust the token likelihood from a text-only model. VLIS improves vision-language models on diverse tasks, including commonsense understanding (WHOOPS, OK-VQA, and ScienceQA) and complex text generation (Concadia, Image Paragraph Captioning, and ROCStories). Our results suggest that VLIS represents a promising new direction for multimodal language generation."
}
Markdown (Informal)
[VLIS: Unimodal Language Models Guide Multimodal Language Generation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.46/) (Chung & Yu, EMNLP 2023)
ACL