@inproceedings{poswiata-perelkiewicz-2022-opi,
title = "{OPI}@{LT}-{EDI}-{ACL}2022: Detecting Signs of Depression from Social Media Text using {R}o{BERT}a Pre-trained Language Models",
author = "Po{\'s}wiata, Rafa{\l} and
Pere{\l}kiewicz, Micha{\l}",
editor = "Chakravarthi, Bharathi Raja and
Bharathi, B and
McCrae, John P and
Zarrouk, Manel and
Bali, Kalika and
Buitelaar, Paul",
booktitle = "Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.ltedi-1.40/",
doi = "10.18653/v1/2022.ltedi-1.40",
pages = "276--282",
abstract = "This paper presents our winning solution for the Shared Task on Detecting Signs of Depression from Social Media Text at LT-EDI-ACL2022. The task was to create a system that, given social media posts in English, should detect the level of depression as {\textquoteleft}not depressed', {\textquoteleft}moderately depressed' or {\textquoteleft}severely depressed'. We based our solution on transformer-based language models. We fine-tuned selected models: BERT, RoBERTa, XLNet, of which the best results were obtained for RoBERTa. Then, using the prepared corpus, we trained our own language model called DepRoBERTa (RoBERTa for Depression Detection). Fine-tuning of this model improved the results. The third solution was to use the ensemble averaging, which turned out to be the best solution. It achieved a macro-averaged F1-score of 0.583. The source code of prepared solution is available at \url{https://github.com/rafalposwiata/depression-detection-lt-edi-2022}."
}
Markdown (Informal)
[OPI@LT-EDI-ACL2022: Detecting Signs of Depression from Social Media Text using RoBERTa Pre-trained Language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.ltedi-1.40/) (Poświata & Perełkiewicz, LTEDI 2022)
ACL