@inproceedings{ghorbanpour-etal-2025-prompting,
title = "Can Prompting {LLM}s Unlock Hate Speech Detection across Languages? A Zero-shot and Few-shot Study",
author = "Ghorbanpour, Faeze and
Dementieva, Daryna and
Fraser, Alexandar",
editor = "Calabrese, Agostina and
de Kock, Christine and
Nozza, Debora and
Plaza-del-Arco, Flor Miriam and
Talat, Zeerak and
Vargas, Francielle",
booktitle = "Proceedings of the The 9th Workshop on Online Abuse and Harms (WOAH)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.woah-1.39/",
pages = "413--425",
ISBN = "979-8-89176-105-6",
abstract = "Despite growing interest in automated hate speech detection, most existing approaches overlook the linguistic diversity of online content. Multilingual instruction-tuned large language models such as LLaMA, Aya, Qwen, and BloomZ offer promising capabilities across languages, but their effectiveness in identifying hate speech through zero-shot and few-shot prompting remains underexplored. This work evaluates LLM prompting-based detection across eight non-English languages, utilizing several prompting techniques and comparing them to fine-tuned encoder models. We show that while zero-shot and few-shot prompting lag behind fine-tuned encoder models on most of the real-world evaluation sets, they achieve better generalization on functional tests for hate speech detection. Our study also reveals that prompt design plays a critical role, with each language often requiring customized prompting techniques to maximize performance."
}
Markdown (Informal)
[Can Prompting LLMs Unlock Hate Speech Detection across Languages? A Zero-shot and Few-shot Study](https://preview.aclanthology.org/landing_page/2025.woah-1.39/) (Ghorbanpour et al., WOAH 2025)
ACL