@inproceedings{li-etal-2025-transferring,
title = "Transferring Textual Preferences to Vision-Language Understanding through Model Merging",
author = "Li, Chen-An and
Lin, Tzu-Han and
Chen, Yun-Nung and
Lee, Hung-yi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.72/",
pages = "923--943",
ISBN = "979-8-89176-252-7",
abstract = "Large vision-language models (LVLMs) perform outstandingly across various multimodal tasks. However, their ability to evaluate generated content remains limited, and training vision-language reward models (VLRMs) with preference data is computationally expensive. This paper explores a training-free alternative by merging text-based reward models (RMs) with LVLMs to create VLRMs. Our approach shows that integrating these models leads to improved performance over LVLMs' scoring and text-based RMs, offering an efficient method for incorporating textual preferences into LVLMs."
}
Markdown (Informal)
[Transferring Textual Preferences to Vision-Language Understanding through Model Merging](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.72/) (Li et al., ACL 2025)
ACL