@inproceedings{roll-graham-2024-greybox,
title = "{G}rey{B}ox at {S}em{E}val-2024 Task 4: Progressive Fine-tuning (for Multilingual Detection of Propaganda Techniques)",
author = "Roll, Nathan and
Graham, Calbert",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Tayyar Madabushi, Harish and
Da San Martino, Giovanni and
Rosenthal, Sara and
Ros{\'a}, Aiala},
booktitle = "Proceedings of the 18th International Workshop on Semantic Evaluation (SemEval-2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.semeval-1.127/",
doi = "10.18653/v1/2024.semeval-1.127",
pages = "888--893",
abstract = "We introduce a novel fine-tuning approach that effectively primes transformer-based language models to detect rhetorical and psychological techniques within internet memes. Our end-to-end system retains multilingual and task-general capacities from pretraining stages while adapting to domain intricacies using an increasingly targeted set of examples{--} achieving competitive rankings across English, Bulgarian, and North Macedonian. We find that our monolingual post-training regimen is sufficient to improve task performance in 17 language varieties beyond equivalent zero-shot capabilities despite English-only data. To promote further research, we release our code publicly on GitHub."
}
Markdown (Informal)
[GreyBox at SemEval-2024 Task 4: Progressive Fine-tuning (for Multilingual Detection of Propaganda Techniques)](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.semeval-1.127/) (Roll & Graham, SemEval 2024)
ACL