@inproceedings{k-b-2025-ssncse-lt,
title = "{SSNCSE}@{LT}-{EDI}-2025:Detecting Misogyny Memes using Pretrained Deep Learning models",
author = "K, Sreeja and
B, Bharathi",
editor = "Gkirtzou, Katerina and
{\v{Z}}itnik, Slavko and
Gracia, Jorge and
Gromann, Dagmar and
di Buono, Maria Pia and
Monti, Johanna and
Ionov, Maxim",
booktitle = "Proceedings of the 5th Conference on Language, Data and Knowledge: Fifth Workshop on Language Technology for Equality, Diversity, Inclusion",
month = sep,
year = "2025",
address = "Naples, Italy",
publisher = "Unior Press",
url = "https://preview.aclanthology.org/corrections-2025-10/2025.ltedi-1.1/",
pages = "1--5",
ISBN = "978-88-6719-334-9",
abstract = "Misogyny meme detection is identifying memes that are harmful or offensive to women. These memes can hide hate behind jokes or images, making them difficult to identify. It{'}s important to detect them for a safer and respectful internet for everyone. Our model proposed a multimodal method for misogyny meme detection in Chinese social media by combining both textual and visual aspects of memes. The training and evaluation data were part of a shared task on detecting misogynistic content. We used a pretrained ResNet-50 architecture to extract visual representations of the memes and processed the meme transcriptions with BERT. The model fused modality-specific representations with a feed-forward neural net for classification. The selected pretrained models were frozen to avoid overfitting and to enhance generalization across all classes, and only the final classifier was fine-tuned on labelled meme recollection. The model was trained and evaluated using test data to achieve a macro F1-score of 0.70345. As a result, we have validated lightweight combining approaches for multimodal fusion techniques on noisy social media and how they can be validated in the context of hostile meme detection tasks."
}
Markdown (Informal)
[SSNCSE@LT-EDI-2025:Detecting Misogyny Memes using Pretrained Deep Learning models](https://preview.aclanthology.org/corrections-2025-10/2025.ltedi-1.1/) (K & B, LTEDI 2025)
ACL