@inproceedings{tang-etal-2024-ignitioninnovators,
title = "{I}gnition{I}nnovators at ``Discharge Me!'': Chain-of-Thought Instruction Finetuning Large Language Models for Discharge Summaries",
author = "Tang, An Quang and
Zhang, Xiuzhen and
Dinh, Minh Ngoc",
editor = "Demner-Fushman, Dina and
Ananiadou, Sophia and
Miwa, Makoto and
Roberts, Kirk and
Tsujii, Junichi",
booktitle = "Proceedings of the 23rd Workshop on Biomedical Natural Language Processing",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.bionlp-1.65/",
doi = "10.18653/v1/2024.bionlp-1.65",
pages = "731--739",
abstract = "This paper presents our proposed approach to the Discharge Me! shared task, collocated with the 23th Workshop on Biomedical Natural Language Processing (BioNLP). In this work, we develop an LLM-based framework for solving the Discharge Summary Documentation (DSD) task, i.e., generating the two critical target sections `Brief Hospital Course' and `Discharge Instructions' in the discharge summary. By streamlining the recent instruction-finetuning process on LLMs, we explore several prompting strategies for optimally adapting LLMs to specific generation task of DSD. Experimental results show that providing a clear output structure, complimented by a set of comprehensive Chain-of-Thoughts (CoT) questions, effectively improves the model{'}s reasoning capability, and thereby, enhancing the structural correctness and faithfulness of clinical information in the generated text. Source code is available at: https://anonymous.4open.science/r/Discharge{\_}LLM-A233"
}
Markdown (Informal)
[IgnitionInnovators at “Discharge Me!”: Chain-of-Thought Instruction Finetuning Large Language Models for Discharge Summaries](https://preview.aclanthology.org/fix-sig-urls/2024.bionlp-1.65/) (Tang et al., BioNLP 2024)
ACL