@inproceedings{wang-etal-2020-building,
title = "Building a Bridge: A Method for Image-Text Sarcasm Detection Without Pretraining on Image-Text Data",
author = "Wang, Xinyu and
Sun, Xiaowen and
Yang, Tan and
Wang, Hongbo",
editor = "Castellucci, Giuseppe and
Filice, Simone and
Poria, Soujanya and
Cambria, Erik and
Specia, Lucia",
booktitle = "Proceedings of the First International Workshop on Natural Language Processing Beyond Text",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.nlpbt-1.3/",
doi = "10.18653/v1/2020.nlpbt-1.3",
pages = "19--29",
abstract = "Sarcasm detection in social media with text and image is becoming more challenging. Previous works of image-text sarcasm detection were mainly to fuse the summaries of text and image: different sub-models read the text and image respectively to get the summaries, and fuses the summaries. Recently, some multi-modal models based on the architecture of BERT are proposed such as ViLBERT. However, they can only be pretrained on the image-text data. In this paper, we propose an image-text model for sarcasm detection using the pretrained BERT and ResNet without any further pretraining. BERT and ResNet have been pretrained on much larger text or image data than image-text data. We connect the vector spaces of BERT and ResNet to utilize more data. We use the pretrained Multi-Head Attention of BERT to model the text and image. Besides, we propose a 2D-Intra-Attention to extract the relationships between words and images. In experiments, our model outperforms the state-of-the-art model."
}
Markdown (Informal)
[Building a Bridge: A Method for Image-Text Sarcasm Detection Without Pretraining on Image-Text Data](https://preview.aclanthology.org/fix-sig-urls/2020.nlpbt-1.3/) (Wang et al., nlpbt 2020)
ACL