@inproceedings{rao-etal-2021-stanker,
title = "{STANKER}: Stacking Network based on Level-grained Attention-masked {BERT} for Rumor Detection on Social Media",
author = "Rao, Dongning and
Miao, Xin and
Jiang, Zhihua and
Li, Ran",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.269/",
doi = "10.18653/v1/2021.emnlp-main.269",
pages = "3347--3363",
abstract = "Rumor detection on social media puts pre-trained language models (LMs), such as BERT, and auxiliary features, such as comments, into use. However, on the one hand, rumor detection datasets in Chinese companies with comments are rare; on the other hand, intensive interaction of attention on Transformer-based models like BERT may hinder performance improvement. To alleviate these problems, we build a new Chinese microblog dataset named Weibo20 by collecting posts and associated comments from Sina Weibo and propose a new ensemble named STANKER (Stacking neTwork bAsed-on atteNtion-masKed BERT). STANKER adopts two level-grained attention-masked BERT (LGAM-BERT) models as base encoders. Unlike the original BERT, our new LGAM-BERT model takes comments as important auxiliary features and masks co-attention between posts and comments on lower-layers. Experiments on Weibo20 and three existing social media datasets showed that STANKER outperformed all compared models, especially beating the old state-of-the-art on Weibo dataset."
}
Markdown (Informal)
[STANKER: Stacking Network based on Level-grained Attention-masked BERT for Rumor Detection on Social Media](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.269/) (Rao et al., EMNLP 2021)
ACL