@inproceedings{khlyzova-etal-2022-complementarity,
title = "On the Complementarity of Images and Text for the Expression of Emotions in Social Media",
author = "Khlyzova, Anna and
Silberer, Carina and
Klinger, Roman",
editor = "Barnes, Jeremy and
De Clercq, Orph{\'e}e and
Barriere, Valentin and
Tafreshi, Shabnam and
Alqahtani, Sawsan and
Sedoc, Jo{\~a}o and
Klinger, Roman and
Balahur, Alexandra",
booktitle = "Proceedings of the 12th Workshop on Computational Approaches to Subjectivity, Sentiment {\&} Social Media Analysis",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.wassa-1.1",
doi = "10.18653/v1/2022.wassa-1.1",
pages = "1--15",
abstract = "Authors of posts in social media communicate their emotions and what causes them with text and images. While there is work on emotion and stimulus detection for each modality separately, it is yet unknown if the modalities contain complementary emotion information in social media. We aim at filling this research gap and contribute a novel, annotated corpus of English multimodal Reddit posts. On this resource, we develop models to automatically detect the relation between image and text, an emotion stimulus category and the emotion class. We evaluate if these tasks require both modalities and find for the image{--}text relations, that text alone is sufficient for most categories (complementary, illustrative, opposing): the information in the text allows to predict if an image is required for emotion understanding. The emotions of anger and sadness are best predicted with a multimodal model, while text alone is sufficient for disgust, joy, and surprise. Stimuli depicted by objects, animals, food, or a person are best predicted by image-only models, while multimodal mod- els are most effective on art, events, memes, places, or screenshots.",
}