@inproceedings{melnychuk-2025-comparing,
title = "Comparing Methods for Multi-Label Classification of Manipulation Techniques in {U}krainian Telegram Content",
author = "Melnychuk, Oleh",
editor = "Romanyshyn, Mariana",
booktitle = "Proceedings of the Fourth Ukrainian Natural Language Processing Workshop (UNLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria (online)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.unlp-1.5/",
pages = "45--48",
ISBN = "979-8-89176-269-5",
abstract = "Detecting manipulation techniques in online text is vital for combating misinformation, a task complicated by generative AI. This paper compares machine learning approaches for multi-label classification of 10 techniques in Ukrainian Telegram content (UNLP 2025 Shared Task 1). Our evaluation included TF-IDF, fine-tuned XLM-RoBERTa-Large, PEFT-LLM (Gemma, Mistral) and a RAG approach (E5 + Mistral Nemo). The fine-tuned XLM-RoBERTa-Large model, which incorporates weighted loss to address class imbalance, yielded the highest Macro F1 score (0.4346). This result surpassed the performance of TF-IDF (Macro F1 0.32-0.36), the PEFT-LLM (0.28-0.33) and RAG (0.309). Synthetic data slightly helped TF-IDF but reduced transformer model performance. The results demonstrate the strong performance of standard transformers like XLM-R when appropriately configured for this classification task."
}
Markdown (Informal)
[Comparing Methods for Multi-Label Classification of Manipulation Techniques in Ukrainian Telegram Content](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.unlp-1.5/) (Melnychuk, UNLP 2025)
ACL