@inproceedings{qin-etal-2023-cliptext,
title = "{CLIPT}ext: A New Paradigm for Zero-shot Text Classification",
author = "Qin, Libo and
Wang, Weiyun and
Chen, Qiguang and
Che, Wanxiang",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2023.findings-acl.69/",
doi = "10.18653/v1/2023.findings-acl.69",
pages = "1077--1088",
abstract = "While CLIP models are useful for zero-shot vision-and-language (VL) tasks or computer vision tasks, little attention has been paid to the application of CLIP for language tasks. Intuitively, CLIP model have a rich representation pre-trained with natural language supervision, in which we argue that it is useful for language tasks. Hence, this work bridge this gap by investigating a CLIP model for zero-shot text classification. Specifically, we introduce CLIPText, a novel paradigm for zero-shot text classification, which reformulates zero-shot text classification into a text-image matching problem that CLIP can be applied to. In addition, we further incorporate prompt into CLIPText (Prompt-CLIPText) to better derive knowledge from CLIP. Experimental results on seven publicly available zero-shot text classification datasets show that both CLIPText and Prompt-CLIPText attain promising performance. Besides, extensive analysis further verifies that knowledge from CLIP can benefit zero-shot text classification task. We hope this work can attract more breakthroughs on applying VL pre-trained models for language tasks."
}
Markdown (Informal)
[CLIPText: A New Paradigm for Zero-shot Text Classification](https://preview.aclanthology.org/landing_page/2023.findings-acl.69/) (Qin et al., Findings 2023)
ACL