@inproceedings{bai-etal-2022-ynu,
title = "{YNU}-{HPCC} at {S}em{E}val-2022 Task 4: Finetuning Pretrained Language Models for Patronizing and Condescending Language Detection",
author = "Bai, Wenqiang and
Wang, Jin and
Zhang, Xuejie",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.61/",
doi = "10.18653/v1/2022.semeval-1.61",
pages = "454--458",
abstract = "This paper describes a system built in the SemEval-2022 competition. As participants in Task 4: Patronizing and Condescending Language Detection, we implemented the text sentiment classification system for two subtasks in English. Both subtasks involve determining emotions; subtask 1 requires us to determine whether the text belongs to the PCL category (single-label classification), and subtask 2 requires us to determine to which PCL category the text belongs (multi-label classification). Our system is based on the bidirectional encoder representations from transformers (BERT) model. For the single-label classification, our system applies a BertForSequenceClassification model to classify the input text. For the multi-label classification, we use the fine-tuned BERT model to extract the sentiment score of the text and a fully connected layer to classify the text into the PCL categories. Our system achieved relatively good results on the competition{'}s official leaderboard."
}
Markdown (Informal)
[YNU-HPCC at SemEval-2022 Task 4: Finetuning Pretrained Language Models for Patronizing and Condescending Language Detection](https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.61/) (Bai et al., SemEval 2022)
ACL