@inproceedings{liu-etal-2024-investigating,
title = "Investigating and Mitigating Object Hallucinations in Pretrained Vision-Language ({CLIP}) Models",
author = "Liu, Yufang and
Ji, Tao and
Sun, Changzhi and
Wu, Yuanbin and
Zhou, Aimin",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.1016/",
doi = "10.18653/v1/2024.emnlp-main.1016",
pages = "18288--18301",
abstract = "Large Vision-Language Models (LVLMs) have achieved impressive performance, yet research has pointed out a serious issue with object hallucinations within these models. However, there is no clear conclusion as to which part of the model these hallucinations originate from. In this paper, we present an in-depth investigation into the object hallucination problem specifically within the CLIP model, which serves as the backbone for many state-of-the-art vision-language systems. We unveil that even in isolation, the CLIP model is prone to object hallucinations, suggesting that the hallucination problem is not solely due to the interaction between vision and language modalities. To address this, we propose a counterfactual data augmentation method by creating negative samples with a variety of hallucination issues. We demonstrate that our method can effectively mitigate object hallucinations for CLIP model, and we show the the enhanced model can be employed as a visual encoder, effectively alleviating the object hallucination issue in LVLMs."
}
Markdown (Informal)
[Investigating and Mitigating Object Hallucinations in Pretrained Vision-Language (CLIP) Models](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.1016/) (Liu et al., EMNLP 2024)
ACL