@inproceedings{akrah-2021-duluthnlp,
title = "{D}uluth{NLP} at {S}em{E}val-2021 Task 7: Fine-Tuning {R}o{BERT}a Model for Humor Detection and Offense Rating",
author = "Akrah, Samuel",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.semeval-1.169/",
doi = "10.18653/v1/2021.semeval-1.169",
pages = "1196--1203",
abstract = "This paper presents the DuluthNLP submission to Task 7 of the SemEval 2021 competition on Detecting and Rating Humor and Offense. In it, we explain the approach used to train the model together with the process of fine-tuning our model in getting the results. We focus on humor detection, rating, and of-fense rating, representing three out of the four subtasks that were provided. We show that optimizing hyper-parameters for learning rate, batch size and number of epochs can increase the accuracy and F1 score for humor detection"
}
Markdown (Informal)
[DuluthNLP at SemEval-2021 Task 7: Fine-Tuning RoBERTa Model for Humor Detection and Offense Rating](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.semeval-1.169/) (Akrah, SemEval 2021)
ACL