@inproceedings{bhatt-shrivastava-2022-tesla,
title = "Tesla at {S}em{E}val-2022 Task 4: Patronizing and Condescending Language Detection using Transformer-based Models with Data Augmentation",
author = "Bhatt, Sahil and
Shrivastava, Manish",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.52/",
doi = "10.18653/v1/2022.semeval-1.52",
pages = "394--399",
abstract = "This paper describes our system for Task 4 of SemEval 2022: Patronizing and Condescending Language (PCL) Detection. For sub-task 1, where the objective is to classify a text as PCL or non-PCL, we use a T5 Model fine-tuned on the dataset. For sub-task 2, which is a multi-label classification problem, we use a RoBERTa model fine-tuned on the dataset. Given that the key challenge in this task is classification on an imbalanced dataset, our models rely on an augmented dataset that we generate using paraphrasing. We found that these two models yield the best results out of all the other approaches we tried."
}
Markdown (Informal)
[Tesla at SemEval-2022 Task 4: Patronizing and Condescending Language Detection using Transformer-based Models with Data Augmentation](https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.52/) (Bhatt & Shrivastava, SemEval 2022)
ACL