@inproceedings{lin-etal-2022-ncuee,
title = "{NCUEE}-{NLP}@{SMM}4{H}`22: Classification of Self-reported Chronic Stress on {T}witter Using Ensemble Pre-trained Transformer Models",
author = "Lin, Tzu-Mi and
Chen, Chao-Yi and
Tzeng, Yu-Wen and
Lee, Lung-Hao",
editor = "Gonzalez-Hernandez, Graciela and
Weissenbacher, Davy",
booktitle = "Proceedings of the Seventh Workshop on Social Media Mining for Health Applications, Workshop {\&} Shared Task",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.smm4h-1.18/",
pages = "62--64",
abstract = "This study describes our proposed system design for the SMM4H 2022 Task 8. We fine-tune the BERT, RoBERTa, ALBERT, XLNet and ELECTRA transformers and their connecting classifiers. Each transformer model is regarded as a standalone method to detect tweets that self-reported chronic stress. The final output classification result is then combined using the majority voting ensemble mechanism. Experimental results indicate that our approach achieved a best F1-score of 0.73 over the positive class."
}
Markdown (Informal)
[NCUEE-NLP@SMM4H’22: Classification of Self-reported Chronic Stress on Twitter Using Ensemble Pre-trained Transformer Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.smm4h-1.18/) (Lin et al., SMM4H 2022)
ACL