@inproceedings{dao-etal-2024-puer,
title = "Puer at {S}em{E}val-2024 Task 4: Fine-tuning Pre-trained Language Models for Meme Persuasion Technique Detection",
author = "Dao, Jiaxu and
Li, Zhuoying and
Su, Youbang and
Gong, Wensheng",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.11/",
doi = "10.18653/v1/2024.semeval-1.11",
pages = "64--69",
abstract = "The paper summarizes our research on multilingual detection of persuasion techniques in memes for the SemEval-2024 Task 4. Our work focused on English-Subtask 1, implemented based on a roberta-large pre-trained model provided by the transforms tool that was fine-tuned into a corpus of social media posts. Our method significantly outperforms the officially released baseline method, and ranked 7th in English-Subtask 1 for the test set. This paper also compares the performances of different deep learning model architectures, such as BERT, ALBERT, and XLM-RoBERTa, on multilingual detection of persuasion techniques in memes. The experimental source code covered in the paper will later be sourced from Github."
}
Markdown (Informal)
[Puer at SemEval-2024 Task 4: Fine-tuning Pre-trained Language Models for Meme Persuasion Technique Detection](https://preview.aclanthology.org/fix-sig-urls/2024.semeval-1.11/) (Dao et al., SemEval 2024)
ACL