@inproceedings{sharma-etal-2021-youngsheldon-semeval,
title = "{Y}oung{S}heldon at {S}em{E}val-2021 Task 7: Fine-tuning Is All You Need",
author = "Sharma, Mayukh and
Kandasamy, Ilanthenral and
Vasantha, W.b.",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.semeval-1.161/",
doi = "10.18653/v1/2021.semeval-1.161",
pages = "1146--1152",
abstract = "In this paper, we describe our system used for SemEval 2021 Task 7: HaHackathon: Detecting and Rating Humor and Offense. We used a simple fine-tuning approach using different Pre-trained Language Models (PLMs) to evaluate their performance for humor and offense detection. For regression tasks, we averaged the scores of different models leading to better performance than the original models. We participated in all SubTasks. Our best performing system was ranked 4 in SubTask 1-b, 8 in SubTask 1-c, 12 in SubTask 2, and performed well in SubTask 1-a. We further show comprehensive results using different pre-trained language models which will help as baselines for future work."
}
Markdown (Informal)
[YoungSheldon at SemEval-2021 Task 7: Fine-tuning Is All You Need](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.semeval-1.161/) (Sharma et al., SemEval 2021)
ACL