@inproceedings{ruder-etal-2025-assessing,
title = "Assessing the Reliability and Validity of {GPT}-4 in Annotating Emotion Appraisal Ratings",
author = "Ruder, Deniss and
Uusberg, Andero and
Sirts, Kairit",
editor = "Zirikly, Ayah and
Yates, Andrew and
Desmet, Bart and
Ireland, Molly and
Bedrick, Steven and
MacAvaney, Sean and
Bar, Kfir and
Ophir, Yaakov",
booktitle = "Proceedings of the 10th Workshop on Computational Linguistics and Clinical Psychology (CLPsych 2025)",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.clpsych-1.1/",
pages = "1--11",
ISBN = "979-8-89176-226-8",
abstract = "Appraisal theories suggest that emotions arise from subjective evaluations of events, referred to as appraisals. The taxonomy of appraisals is quite diverse, and they are usually given ratings on a Likert scale to be annotated in an experiencer-annotator or reader-annotator paradigm. This paper studies GPT-4 as a reader-annotator of 21 specific appraisal ratings in different prompt settings, aiming to evaluate and improve its performance compared to human annotators. We found that GPT-4 is an effective reader-annotator that performs close to or even slightly better than human annotators, and its results can be significantly improved by using a majority voting of five completions. GPT-4 also effectively predicts appraisal ratings and emotion labels using a single prompt, but adding instruction complexity results in poorer performance. We also found that longer event descriptions lead to more accurate annotations for both model and human annotator ratings. This work contributes to the growing usage of LLMs in psychology and the strategies for improving GPT-4 performance in annotating appraisals."
}
Markdown (Informal)
[Assessing the Reliability and Validity of GPT-4 in Annotating Emotion Appraisal Ratings](https://preview.aclanthology.org/fix-sig-urls/2025.clpsych-1.1/) (Ruder et al., CLPsych 2025)
ACL