@inproceedings{wazni-etal-2024-verbclip,
title = "{V}erb{CLIP}: Improving Verb Understanding in Vision-Language Models with Compositional Structures",
author = "Wazni, Hadi and
Lo, Kin Ian and
Sadrzadeh, Mehrnoosh",
editor = "Gu, Jing and
Fu, Tsu-Jui (Ray) and
Hudson, Drew and
Celikyilmaz, Asli and
Wang, William",
booktitle = "Proceedings of the 3rd Workshop on Advances in Language and Vision Research (ALVR)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.alvr-1.17/",
doi = "10.18653/v1/2024.alvr-1.17",
pages = "195--201",
abstract = "Verbs describe the dynamics of interactions between people, objects, and their environments. They play a crucial role in language formation and understanding. Nonetheless, recent vision-language models like CLIP predominantly rely on nouns and have a limited account of verbs. This limitation affects their performance in tasks requiring action recognition and scene understanding. In this work, we introduce VerbCLIP, a verb-centric vision-language model which learns meanings of verbs based on a compositional approach to statistical machine learning. Our methods significantly outperform CLIP in zero-shot performance on the VALSE, VL-Checklist, and SVO-Probes datasets, with improvements of +2.38{\%}, +3.14{\%}, and +1.47{\%}, without fine-tuning. Fine-tuning resulted in further improvements, with gains of +2.85{\%} and +9.2{\%} on the VALSE and VL-Checklist datasets."
}
Markdown (Informal)
[VerbCLIP: Improving Verb Understanding in Vision-Language Models with Compositional Structures](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.alvr-1.17/) (Wazni et al., ALVR 2024)
ACL