@inproceedings{romanowski-etal-2025-punchlines,
title = "From Punchlines to Predictions: A Metric to Assess {LLM} Performance in Identifying Humor in Stand-Up Comedy",
author = "Romanowski, Adrianna and
Valois, Pedro H. V. and
Fukui, Kazuhiro",
editor = "Kuribayashi, Tatsuki and
Rambelli, Giulia and
Takmaz, Ece and
Wicke, Philipp and
Li, Jixing and
Oh, Byung-Doh",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.6/",
pages = "36--46",
ISBN = "979-8-89176-227-5",
abstract = "Comedy serves as a profound reflection of the times we live in and is a staple element of human interactions. In light of the widespread adoption of Large Language Models (LLMs), the intersection of humor and AI has become no laughing matter. Advancements in the naturalness of human-computer interaction correlates with improvements in AI systems' abilities to understand humor. In this study, we assess the ability of models in accurately identifying humorous quotes from a stand-up comedy transcript. Stand-up comedy{'}s unique comedic narratives make it an ideal dataset to improve the overall naturalness of comedic understanding. We propose a novel humor detection metric designed to evaluate LLMs amongst various prompts on their capability to extract humorous punchlines. The metric has a modular structure that offers three different scoring methods - fuzzy string matching, sentence embedding, and subspace similarity - to provide an overarching assessment of a model{'}s performance. The model{'}s results are compared against those of human evaluators on the same task. Our metric reveals that regardless of prompt engineering, leading models, ChatGPT, Claude, and DeepSeek, achieve scores of at most 51{\%} in humor detection. Notably, this performance surpasses that of humans who achieve a score of 41{\%}. The analysis of human evaluators and LLMs reveals variability in agreement, highlighting the subjectivity inherent in humor and the complexities involved in extracting humorous quotes from live performance transcripts."
}
Markdown (Informal)
[From Punchlines to Predictions: A Metric to Assess LLM Performance in Identifying Humor in Stand-Up Comedy](https://preview.aclanthology.org/fix-sig-urls/2025.cmcl-1.6/) (Romanowski et al., CMCL 2025)
ACL