@inproceedings{gupta-sharma-2021-nlpiitr,
title = "{NLPIITR} at {S}em{E}val-2021 Task 6: {R}o{BERT}a Model with Data Augmentation for Persuasion Techniques Detection",
author = "Gupta, Vansh and
Sharma, Raksha",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2021.semeval-1.147/",
doi = "10.18653/v1/2021.semeval-1.147",
pages = "1061--1067",
abstract = "This paper describes and examines different systems to address Task 6 of SemEval-2021: Detection of Persuasion Techniques In Texts And Images, Subtask 1. The task aims to build a model for identifying rhetorical and psycho- logical techniques (such as causal oversimplification, name-calling, smear) in the textual content of a meme which is often used in a disinformation campaign to influence the users. The paper provides an extensive comparison among various machine learning systems as a solution to the task. We elaborate on the pre-processing of the text data in favor of the task and present ways to overcome the class imbalance. The results show that fine-tuning a RoBERTa model gave the best results with an F1-Micro score of 0.51 on the development set."
}
Markdown (Informal)
[NLPIITR at SemEval-2021 Task 6: RoBERTa Model with Data Augmentation for Persuasion Techniques Detection](https://preview.aclanthology.org/ingest_wac_2008/2021.semeval-1.147/) (Gupta & Sharma, SemEval 2021)
ACL