@inproceedings{chheda-etal-2025-sarcastic,
title = "Could you {BE} more sarcastic? A Cognitive Approach to Bidirectional Sarcasm Understanding in Language Models",
author = "Chheda, Veer and
Sankhe, Avantika and
Sankhe, Atharva Vinay",
editor = "T.y.s.s, Santosh and
Shimizu, Shuichiro and
Gong, Yifan",
booktitle = "The 14th International Joint Conference on Natural Language Processing and The 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-srw.22/",
pages = "259--276",
ISBN = "979-8-89176-304-3",
abstract = "Sarcasm is a specific form of ironic speech which can often be hard to understand for language models due to its nuanced nature. Recent improvements in the ability of such models to detect and generate sarcasm motivate us to try a new approach to help language models perceive sarcasm as a speech style, through a human cognitive perspective. In this work, we propose a multi-hop Chain of Thought (CoT) methodology to understand the context of an utterance that follows a dialogue and to perform bidirectional style transfer on that utterance, leveraging the Theory of Mind. We use small language models (SLMs) due to their cost-efficiency and fast response-time. The generated utterances are evaluated using both LLM-as-a-judge and human evaluation, suitable to the open-ended and stylistic nature of the generations. Along with these, we also evaluate scores of automated metrics such as DialogRPT, BLEU and SBERT; drawing valuable insights from them that support our evidence. Based on this, we find that our cognitive approach to sarcasm is an effective way for language models to stylistically understand and generate sarcasm with better authenticity."
}Markdown (Informal)
[Could you BE more sarcastic? A Cognitive Approach to Bidirectional Sarcasm Understanding in Language Models](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-srw.22/) (Chheda et al., IJCNLP 2025)
ACL