@inproceedings{du-etal-2022-pali,
title = "{PALI}-{NLP} at {S}em{E}val-2022 Task 6: i{S}arcasm{E}val- Fine-tuning the Pre-trained Model for Detecting Intended Sarcasm",
author = "Du, Xiyang and
Hu, Dou and
Zhi, Jin and
Jiang, Lianxin and
Shi, Xiaofeng",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.112/",
doi = "10.18653/v1/2022.semeval-1.112",
pages = "815--819",
abstract = "This paper describes the method we utilized in the SemEval-2022 Task 6 iSarcasmEval: Intended Sarcasm Detection In English and Arabic. Our system has achieved 1st in SubtaskB, which is to identify the categories of intended sarcasm. The proposed system integrates multiple BERT-based, RoBERTa-based and BERTweet-based models with finetuning. In this task, we contributed the following: 1) we reveal several large pre-trained models' performance on tasks coping with the tweet-like text. 2) Our methods prove that we can still achieve excellent results in this particular task without a complex classifier adopting some proper training method. 3) we found there is a hierarchical relationship of sarcasm types in this task."
}
Markdown (Informal)
[PALI-NLP at SemEval-2022 Task 6: iSarcasmEval- Fine-tuning the Pre-trained Model for Detecting Intended Sarcasm](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.112/) (Du et al., SemEval 2022)
ACL