@inproceedings{cao-etal-2024-introducing,
title = "Introducing {G}en{C}eption for Multimodal {LLM} Benchmarking: You May Bypass Annotations",
author = "Cao, Lele and
Buchner, Valentin and
Senane, Zineb and
Yang, Fangkai",
editor = "Ovalle, Anaelia and
Chang, Kai-Wei and
Cao, Yang Trista and
Mehrabi, Ninareh and
Zhao, Jieyu and
Galstyan, Aram and
Dhamala, Jwala and
Kumar, Anoop and
Gupta, Rahul",
booktitle = "Proceedings of the 4th Workshop on Trustworthy Natural Language Processing (TrustNLP 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.trustnlp-1.16/",
doi = "10.18653/v1/2024.trustnlp-1.16",
pages = "196--201",
abstract = "Multimodal Large Language Models (MLLMs) are commonly evaluated using costly annotated multimodal benchmarks. However, these benchmarks often struggle to keep pace with the rapidly advancing requirements of MLLM evaluation. We propose GenCeption, a novel and annotation-free MLLM evaluation framework that merely requires unimodal data to assess inter-modality semantic coherence and inversely reflects the models' inclination to hallucinate. Analogous to the popular DrawCeption game, GenCeption initiates with a non-textual sample and undergoes a series of iterative description and generation steps. Semantic drift across iterations is quantified using the GC@T metric. Our empirical findings validate GenCeption{'}s efficacy, showing strong correlations with popular MLLM benchmarking results. GenCeption may be extended to mitigate training data contamination by utilizing ubiquitous, previously unseen unimodal data."
}
Markdown (Informal)
[Introducing GenCeption for Multimodal LLM Benchmarking: You May Bypass Annotations](https://preview.aclanthology.org/fix-sig-urls/2024.trustnlp-1.16/) (Cao et al., TrustNLP 2024)
ACL