@inproceedings{hee-etal-2024-bridging,
title = "Bridging Modalities: Enhancing Cross-Modality Hate Speech Detection with Few-Shot In-Context Learning",
author = "Hee, Ming Shan and
Kumaresan, Aditi and
Lee, Roy Ka-Wei",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.445/",
doi = "10.18653/v1/2024.emnlp-main.445",
pages = "7785--7799",
abstract = "The widespread presence of hate speech on the internet, including formats such as text-based tweets and multimodal memes, poses a significant challenge to digital platform safety. Recent research has developed detection models tailored to specific modalities; however, there is a notable gap in transferring detection capabilities across different formats. This study conducts extensive experiments using few-shot in-context learning with large language models to explore the transferability of hate speech detection between modalities. Our findings demonstrate that text-based hate speech examples can significantly enhance the classification accuracy of vision-language hate speech. Moreover, text-based demonstrations outperform vision-language demonstrations in few-shot learning settings. These results highlight the effectiveness of cross-modality knowledge transfer and offer valuable insights for improving hate speech detection systems."
}
Markdown (Informal)
[Bridging Modalities: Enhancing Cross-Modality Hate Speech Detection with Few-Shot In-Context Learning](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.445/) (Hee et al., EMNLP 2024)
ACL