@inproceedings{dong-etal-2020-transformer,
title = "Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media",
author = "Dong, Xiangjue and
Li, Changmao and
Choi, Jinho D.",
editor = "Klebanov, Beata Beigman and
Shutova, Ekaterina and
Lichtenstein, Patricia and
Muresan, Smaranda and
Wee, Chee and
Feldman, Anna and
Ghosh, Debanjan",
booktitle = "Proceedings of the Second Workshop on Figurative Language Processing",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.figlang-1.38/",
doi = "10.18653/v1/2020.figlang-1.38",
pages = "276--280",
abstract = "We present a transformer-based sarcasm detection model that accounts for the context from the entire conversation thread for more robust predictions. Our model uses deep transformer layers to perform multi-head attentions among the target utterance and the relevant context in the thread. The context-aware models are evaluated on two datasets from social media, Twitter and Reddit, and show 3.1{\%} and 7.0{\%} improvements over their baselines. Our best models give the F1-scores of 79.0{\%} and 75.0{\%} for the Twitter and Reddit datasets respectively, becoming one of the highest performing systems among 36 participants in this shared task."
}
Markdown (Informal)
[Transformer-based Context-aware Sarcasm Detection in Conversation Threads from Social Media](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.figlang-1.38/) (Dong et al., Fig-Lang 2020)
ACL