@inproceedings{guo-fan-2024-nlpnchu,
title = "{NLPNCHU} at {S}em{E}val-2024 Task 4: A Comparison of {MDHC} Strategy and In-domain Pre-training for Multilingual Detection of Persuasion Techniques in Memes",
author = "Guo, Shih-wei and
Fan, Yao-chung",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.262/",
doi = "10.18653/v1/2024.semeval-1.262",
pages = "1868--1875",
abstract = "This study presents a systematic method for identifying 22 persuasive techniques used in multilingual memes. We explored various fine-tuning techniques and classification strategies, such as data augmentation, problem transformation, and hierarchical multi-label classification strategies. Identifying persuasive techniques in memes involves a multimodal task. We fine-tuned the XLM-RoBERTA-large-twitter language model, focusing on domain-specific language modeling, and integrated it with the CLIP visual model{'}s embedding to consider image and text features simultaneously. In our experiments, we evaluated the effectiveness of our approach by using official validation data in English. Our system in the competition, achieving competitive rankings in Subtask1 and Subtask2b across four languages: English, Bulgarian, North Macedonian, and Arabic. Significantly, we achieved 2nd place ranking for Arabic language in Subtask 1."
}
Markdown (Informal)
[NLPNCHU at SemEval-2024 Task 4: A Comparison of MDHC Strategy and In-domain Pre-training for Multilingual Detection of Persuasion Techniques in Memes](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.262/) (Guo & Fan, SemEval 2024)
ACL