@inproceedings{prakash-babu-eswari-2020-cia,
    title = "{CIA}{\_}{NITT} at {WNUT}-2020 Task 2: Classification of {COVID}-19 Tweets Using Pre-trained Language Models",
    author = "Prakash Babu, Yandrapati  and
      Eswari, Rajagopal",
    editor = "Xu, Wei  and
      Ritter, Alan  and
      Baldwin, Tim  and
      Rahimi, Afshin",
    booktitle = "Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)",
    month = nov,
    year = "2020",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://preview.aclanthology.org/ingest-emnlp/2020.wnut-1.70/",
    doi = "10.18653/v1/2020.wnut-1.70",
    pages = "471--474",
    abstract = "This paper presents our models for WNUT2020 shared task2. The shared task2 involves identification of COVID-19 related informative tweets. We treat this as binary text clas-sification problem and experiment with pre-trained language models. Our first model which is based on CT-BERT achieves F1-scoreof 88.7{\%} and second model which is an ensemble of CT-BERT, RoBERTa and SVM achieves F1-score of 88.52{\%}."
}Markdown (Informal)
[CIA_NITT at WNUT-2020 Task 2: Classification of COVID-19 Tweets Using Pre-trained Language Models](https://preview.aclanthology.org/ingest-emnlp/2020.wnut-1.70/) (Prakash Babu & Eswari, WNUT 2020)
ACL