@inproceedings{song-etal-2022-clip,
title = "{CLIP} Models are Few-Shot Learners: Empirical Studies on {VQA} and Visual Entailment",
author = "Song, Haoyu and
Dong, Li and
Zhang, Weinan and
Liu, Ting and
Wei, Furu",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.acl-long.421/",
doi = "10.18653/v1/2022.acl-long.421",
pages = "6088--6100",
abstract = "CLIP has shown a remarkable zero-shot capability on a wide range of vision tasks. Previously, CLIP is only regarded as a powerful visual encoder. However, after being pre-trained by language supervision from a large amount of image-caption pairs, CLIP itself should also have acquired some few-shot abilities for vision-language tasks. In this work, we empirically show that CLIP can be a strong vision-language few-shot learner by leveraging the power of language. We first evaluate CLIP`s zero-shot performance on a typical visual question answering task and demonstrate a zero-shot cross-modality transfer capability of CLIP on the visual entailment task. Then we propose a parameter-efficient fine-tuning strategy to boost the few-shot performance on the vqa task. We achieve competitive zero/few-shot results on the visual question answering and visual entailment tasks without introducing any additional pre-training procedure."
}
Markdown (Informal)
[CLIP Models are Few-Shot Learners: Empirical Studies on VQA and Visual Entailment](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.acl-long.421/) (Song et al., ACL 2022)
ACL