@inproceedings{yang-etal-2020-imec,
title = "imec-{ETRO}-{VUB} at {W}-{NUT} 2020 Shared Task-3: A multilabel {BERT}-based system for predicting {COVID}-19 events",
author = "Yang, Xiangyu and
Bekoulis, Giannis and
Deligiannis, Nikos",
editor = "Xu, Wei and
Ritter, Alan and
Baldwin, Tim and
Rahimi, Afshin",
booktitle = "Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.wnut-1.77/",
doi = "10.18653/v1/2020.wnut-1.77",
pages = "505--513",
abstract = "In this paper, we present our system designed to address the W-NUT 2020 shared task for COVID-19 Event Extraction from Twitter. To mitigate the noisy nature of the Twitter stream, our system makes use of the COVID-Twitter-BERT (CT-BERT), which is a language model pre-trained on a large corpus of COVID-19 related Twitter messages. Our system is trained on the COVID-19 Twitter Event Corpus and is able to identify relevant text spans that answer pre-defined questions (i.e., slot types) for five COVID-19 related events (i.e., TESTED POSITIVE, TESTED NEGATIVE, CAN-NOT-TEST, DEATH and CURE {\&} PREVENTION). We have experimented with different architectures; our best performing model relies on a multilabel classifier on top of the CT-BERT model that jointly trains all the slot types for a single event. Our experimental results indicate that our Multilabel-CT-BERT system outperforms the baseline methods by 7 percentage points in terms of micro average F1 score. Our model ranked as 4th in the shared task leaderboard."
}
Markdown (Informal)
[imec-ETRO-VUB at W-NUT 2020 Shared Task-3: A multilabel BERT-based system for predicting COVID-19 events](https://preview.aclanthology.org/fix-sig-urls/2020.wnut-1.77/) (Yang et al., WNUT 2020)
ACL