@inproceedings{loginova-2025-fine,
title = "{F}ine{-}{T}uned Transformers for Detection and Classification of Persuasion Techniques in {S}lavic Languages",
author = "Loginova, Ekaterina",
editor = "Piskorski, Jakub and
P{\v{r}}ib{\'a}{\v{n}}, Pavel and
Nakov, Preslav and
Yangarber, Roman and
Marcinczuk, Michal",
booktitle = "Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bsnlp-1.17/",
pages = "151--156",
ISBN = "978-1-959429-57-9",
abstract = "This paper details a system developed for the SlavicNLP 2025 Shared Task on the Detection and Classification of Persuasion Techniques in Texts for Slavic Languages (Bulgarian, Croatian, Polish, Russian and Slovene). The shared task comprises two subtasks: binary detection of persuasive content within text fragments and multi-class, multi-label identification of specific persuasion techniques at the token level. Our primary approach for both subtasks involved fine-tuning pre-trained multilingual Transformer models. For Subtask 1 (paragraph{-}level binary detection) we fine{-}tuned a multilingual Transformer sequence classifier, its training augmented by a set of additional labelled data. For Subtask 2 (token{-}level multi{-}label classification) we re{-}cast the problem as named{-}entity recognition. The resulting systems reached F1 score of 0.92 in paragraph{-}level detection (ranked third on average). We present our system architecture, data handling, training procedures, and official results, alongside areas for future improvement."
}
Markdown (Informal)
[Fine‐Tuned Transformers for Detection and Classification of Persuasion Techniques in Slavic Languages](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bsnlp-1.17/) (Loginova, BSNLP 2025)
ACL