@inproceedings{tran-matsui-2024-team,
title = "Team {ISM} at {CLP}sych 2024: Extracting Evidence of Suicide Risk from {R}eddit Posts with Knowledge Self-Generation and Output Refinement using A Large Language Model",
author = "Tran, Vu and
Matsui, Tomoko",
editor = "Yates, Andrew and
Desmet, Bart and
Prud{'}hommeaux, Emily and
Zirikly, Ayah and
Bedrick, Steven and
MacAvaney, Sean and
Bar, Kfir and
Ireland, Molly and
Ophir, Yaakov",
booktitle = "Proceedings of the 9th Workshop on Computational Linguistics and Clinical Psychology (CLPsych 2024)",
month = mar,
year = "2024",
address = "St. Julians, Malta",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.clpsych-1.16/",
pages = "191--196",
abstract = "This paper presents our approach to the CLPsych 2024 shared task: utilizing large language models (LLMs) for finding supporting evidence about an individual`s suicide risk level in Reddit posts. Our framework is constructed around an LLM with knowledge self-generation and output refinement. The knowledge self-generation process produces task-related knowledge which is generated by the LLM and leads to accurate risk predictions. The output refinement process, later, with the selected best set of LLM-generated knowledge, refines the outputs by prompting the LLM repeatedly with different knowledge instances interchangeably. We achieved highly competitive results comparing to the top-performance participants with our official recall of 93.5{\%}, recall{--}precision harmonic-mean of 92.3{\%}, and mean consistency of 96.1{\%}."
}
Markdown (Informal)
[Team ISM at CLPsych 2024: Extracting Evidence of Suicide Risk from Reddit Posts with Knowledge Self-Generation and Output Refinement using A Large Language Model](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.clpsych-1.16/) (Tran & Matsui, CLPsych 2024)
ACL