@inproceedings{dobreva-keller-2021-investigating,
title = "Investigating Negation in Pre-trained Vision-and-language Models",
author = "Dobreva, Radina and
Keller, Frank",
editor = "Bastings, Jasmijn and
Belinkov, Yonatan and
Dupoux, Emmanuel and
Giulianelli, Mario and
Hupkes, Dieuwke and
Pinter, Yuval and
Sajjad, Hassan",
booktitle = "Proceedings of the Fourth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.blackboxnlp-1.27/",
doi = "10.18653/v1/2021.blackboxnlp-1.27",
pages = "350--362",
abstract = "Pre-trained vision-and-language models have achieved impressive results on a variety of tasks, including ones that require complex reasoning beyond object recognition. However, little is known about how they achieve these results or what their limitations are. In this paper, we focus on a particular linguistic capability, namely the understanding of negation. We borrow techniques from the analysis of language models to investigate the ability of pre-trained vision-and-language models to handle negation. We find that these models severely underperform in the presence of negation."
}
Markdown (Informal)
[Investigating Negation in Pre-trained Vision-and-language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.blackboxnlp-1.27/) (Dobreva & Keller, BlackboxNLP 2021)
ACL