@inproceedings{al-omari-etal-2021-dljust,
title = "{DLJUST} at {S}em{E}val-2021 Task 7: Hahackathon: Linking Humor and Offense",
author = "Al-Omari, Hani and
AbedulNabi, Isra{'}a and
Duwairi, Rehab",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.155/",
doi = "10.18653/v1/2021.semeval-1.155",
pages = "1114--1119",
abstract = "Humor detection and rating poses interesting linguistic challenges to NLP; it is highly subjective depending on the perceptions of a joke and the context in which it is used. This paper utilizes and compares transformers models; BERT base and Large, BERTweet, RoBERTa base and Large, and RoBERTa base irony, for detecting and rating humor and offense. The proposed models, where given a text in cased and uncased type obtained from SemEval-2021 Task7: HaHackathon: Linking Humor and Offense Across Different Age Groups. The highest scored model for the first subtask: Humor Detection, is BERTweet base cased model with 0.9540 F1-score, for the second subtask: Average Humor Rating Score, it is BERT Large cased with the minimum RMSE of 0.5555, for the fourth subtask: Average Offensiveness Rating Score, it is BERTweet base cased model with minimum RMSE of 0.4822."
}
Markdown (Informal)
[DLJUST at SemEval-2021 Task 7: Hahackathon: Linking Humor and Offense](https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.155/) (Al-Omari et al., SemEval 2021)
ACL