@inproceedings{li-scarton-2020-revisiting,
title = "Revisiting Rumour Stance Classification: Dealing with Imbalanced Data",
author = "Li, Yue and
Scarton, Carolina",
booktitle = "Proceedings of the 3rd International Workshop on Rumours and Deception in Social Media (RDSM)",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.rdsm-1.4",
pages = "38--44",
abstract = "Correctly classifying stances of replies can be significantly helpful for the automatic detection and classification of online rumours. One major challenge is that there are considerably more non-relevant replies (comments) than informative ones (supports and denies), making the task highly imbalanced. In this paper we revisit the task of rumour stance classification, aiming to improve the performance over the informative minority classes. We experiment with traditional methods for imbalanced data treatment with feature- and BERT-based classifiers. Our models outperform all systems in RumourEval 2017 shared task and rank second in RumourEval 2019.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-scarton-2020-revisiting">
<titleInfo>
<title>Revisiting Rumour Stance Classification: Dealing with Imbalanced Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolina</namePart>
<namePart type="family">Scarton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd International Workshop on Rumours and Deception in Social Media (RDSM)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Correctly classifying stances of replies can be significantly helpful for the automatic detection and classification of online rumours. One major challenge is that there are considerably more non-relevant replies (comments) than informative ones (supports and denies), making the task highly imbalanced. In this paper we revisit the task of rumour stance classification, aiming to improve the performance over the informative minority classes. We experiment with traditional methods for imbalanced data treatment with feature- and BERT-based classifiers. Our models outperform all systems in RumourEval 2017 shared task and rank second in RumourEval 2019.</abstract>
<identifier type="citekey">li-scarton-2020-revisiting</identifier>
<location>
<url>https://aclanthology.org/2020.rdsm-1.4</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>38</start>
<end>44</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Revisiting Rumour Stance Classification: Dealing with Imbalanced Data
%A Li, Yue
%A Scarton, Carolina
%S Proceedings of the 3rd International Workshop on Rumours and Deception in Social Media (RDSM)
%D 2020
%8 dec
%I Association for Computational Linguistics
%C Barcelona, Spain (Online)
%F li-scarton-2020-revisiting
%X Correctly classifying stances of replies can be significantly helpful for the automatic detection and classification of online rumours. One major challenge is that there are considerably more non-relevant replies (comments) than informative ones (supports and denies), making the task highly imbalanced. In this paper we revisit the task of rumour stance classification, aiming to improve the performance over the informative minority classes. We experiment with traditional methods for imbalanced data treatment with feature- and BERT-based classifiers. Our models outperform all systems in RumourEval 2017 shared task and rank second in RumourEval 2019.
%U https://aclanthology.org/2020.rdsm-1.4
%P 38-44
Markdown (Informal)
[Revisiting Rumour Stance Classification: Dealing with Imbalanced Data](https://aclanthology.org/2020.rdsm-1.4) (Li & Scarton, RDSM 2020)
ACL