@inproceedings{xu-2022-xu,
title = "Xu at {S}em{E}val-2022 Task 4: Pre-{BERT} Neural Network Methods vs Post-{BERT} {R}o{BERT}a Approach for Patronizing and Condescending Language Detection",
author = "Xu, Jinghua",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.65/",
doi = "10.18653/v1/2022.semeval-1.65",
pages = "479--484",
abstract = "This paper describes my participation in the SemEval-2022 Task 4: Patronizing and Condescending Language Detection. I participate in both subtasks: Patronizing and Condescending Language (PCL) Identification and Patronizing and Condescending Language Categorization, with the main focus put on subtask 1. The experiments compare pre-BERT neural network (NN) based systems against post-BERT pretrained language model RoBERTa. This research finds NN-based systems in the experiments perform worse on the task compared to the pretrained language models. The top-performing RoBERTa system is ranked 26 out of 78 teams (F1-score: 54.64) in subtask 1, and 23 out of 49 teams (F1-score: 30.03) in subtask 2."
}
Markdown (Informal)
[Xu at SemEval-2022 Task 4: Pre-BERT Neural Network Methods vs Post-BERT RoBERTa Approach for Patronizing and Condescending Language Detection](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.65/) (Xu, SemEval 2022)
ACL