@inproceedings{bauer-etal-2024-offensiveness,
title = "Offensiveness, Hate, Emotion and {GPT}: Benchmarking {GPT}3.5 and {GPT}4 as Classifiers on {T}witter-specific Datasets",
author = "Bauer, Nikolaj and
Preisig, Moritz and
Volk, Martin",
editor = "Kumar, Ritesh and
Ojha, Atul Kr. and
Malmasi, Shervin and
Chakravarthi, Bharathi Raja and
Lahiri, Bornini and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the Fourth Workshop on Threat, Aggression {\&} Cyberbullying @ LREC-COLING-2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.trac-1.14/",
pages = "126--133",
abstract = "In this paper, we extend the work of benchmarking GPT by turning GPT models into classifiers and applying them on three different Twitter datasets on Hate-Speech Detection, Offensive Language Detection, and Emotion Classification. We use a Zero-Shot and Few-Shot approach to evaluate the classification capabilities of the GPT models. Our results show that GPT models do not always beat fine-tuned models on the tested benchmarks. However, in Hate-Speech and Emotion Detection, using a Few-Shot approach, state-of-the-art performance can be achieved. The results also reveal that GPT-4 is more sensitive to the examples given in a Few-Shot prompt, highlighting the importance of choosing fitting examples for inference and prompt formulation."
}
Markdown (Informal)
[Offensiveness, Hate, Emotion and GPT: Benchmarking GPT3.5 and GPT4 as Classifiers on Twitter-specific Datasets](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.trac-1.14/) (Bauer et al., TRAC 2024)
ACL