@inproceedings{soltani-romberg-2023-general,
title = "A General Framework for Multimodal Argument Persuasiveness Classification of Tweets",
author = "Soltani, Mohammad and
Romberg, Julia",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.argmining-1.15/",
doi = "10.18653/v1/2023.argmining-1.15",
pages = "148--156",
abstract = "An important property of argumentation concerns the degree of its persuasiveness, which can be influenced by various modalities. On social media platforms, individuals usually have the option of supporting their textual statements with images. The goals of the ImageArg shared task, held with ArgMining 2023, were therefore (A) to classify tweet stances considering both modalities and (B) to predict the influence of an image on the persuasiveness of a tweet text. In this paper, we present our proposed methodology that shows strong performance on both tasks, placing 3rd team on the leaderboard in each case with F1 scores of 0.8273 (A) and 0.5281 (B). The framework relies on pre-trained models to extract text and image features, which are then fed into a task-specific classification model. Our experiments highlighted that the multimodal vision and language model CLIP holds a specific importance in the extraction of features, in particular for task (A)."
}
Markdown (Informal)
[A General Framework for Multimodal Argument Persuasiveness Classification of Tweets](https://preview.aclanthology.org/fix-sig-urls/2023.argmining-1.15/) (Soltani & Romberg, ArgMining 2023)
ACL