@inproceedings{cekinel-etal-2025-multimodal,
title = "Multimodal Fact-Checking with Vision Language Models: A Probing Classifier based Solution with Embedding Strategies",
author = {Cekinel, Recep Firat and
Karagoz, Pinar and
{\c{C}}{\"o}ltekin, {\c{C}}a{\u{g}}r{\i}},
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.310/",
pages = "4622--4633",
abstract = "This study evaluates the effectiveness of Vision Language Models (VLMs) in representing and utilizing multimodal content for fact-checking. To be more specific, we investigate whether incorporating multimodal content improves performance compared to text-only models and how well VLMs utilize text and image information to enhance misinformation detection. Furthermore we propose a probing classifier based solution using VLMs. Our approach extracts embeddings from the last hidden layer of selected VLMs and inputs them into a neural probing classifier for multi-class veracity classification. Through a series of experiments on two fact-checking datasets, we demonstrate that while multimodality can enhance performance, fusing separate embeddings from text and image encoders yielded superior results compared to using VLM embeddings. Furthermore, the proposed neural classifier significantly outperformed KNN and SVM baselines in leveraging extracted embeddings, highlighting its effectiveness for multimodal fact-checking."
}
Markdown (Informal)
[Multimodal Fact-Checking with Vision Language Models: A Probing Classifier based Solution with Embedding Strategies](https://preview.aclanthology.org/fix-sig-urls/2025.coling-main.310/) (Cekinel et al., COLING 2025)
ACL