@inproceedings{zanwar-etal-2022-mantis,
title = "{MANTIS} at {SMM}4{H}`2022: Pre-Trained Language Models Meet a Suite of Psycholinguistic Features for the Detection of Self-Reported Chronic Stress",
author = "Zanwar, Sourabh and
Wiechmann, Daniel and
Qiao, Yu and
Kerz, Elma",
editor = "Gonzalez-Hernandez, Graciela and
Weissenbacher, Davy",
booktitle = "Proceedings of the Seventh Workshop on Social Media Mining for Health Applications, Workshop {\&} Shared Task",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.smm4h-1.5/",
pages = "16--18",
abstract = "This paper describes our submission to Social Media Mining for Health (SMM4H) 2022 Shared Task 8, aimed at detecting self-reported chronic stress on Twitter. Our approach leverages a pre-trained transformer model (RoBERTa) in combination with a Bidirectional Long Short-Term Memory (BiLSTM) network trained on a diverse set of psycholinguistic features. We handle the class imbalance issue in the training dataset by augmenting it by another dataset used for stress classification in social media."
}
Markdown (Informal)
[MANTIS at SMM4H’2022: Pre-Trained Language Models Meet a Suite of Psycholinguistic Features for the Detection of Self-Reported Chronic Stress](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.smm4h-1.5/) (Zanwar et al., SMM4H 2022)
ACL