@inproceedings{suri-etal-2023-healthmavericks,
title = "{H}ealth{M}avericks@{MEDIQA}-Chat 2023: Benchmarking different Transformer based models for Clinical Dialogue Summarization",
author = "Suri, Kunal and
Saha, Saumajit and
Singh, Atul",
editor = "Naumann, Tristan and
Ben Abacha, Asma and
Bethard, Steven and
Roberts, Kirk and
Rumshisky, Anna",
booktitle = "Proceedings of the 5th Clinical Natural Language Processing Workshop",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.clinicalnlp-1.50/",
doi = "10.18653/v1/2023.clinicalnlp-1.50",
pages = "472--489",
abstract = "In recent years, we have seen many Transformer based models being created to address Dialog Summarization problem. While there has been a lot of work on understanding how these models stack against each other in summarizing regular conversations such as the ones found in DialogSum dataset, there haven`t been many analysis of these models on Clinical Dialog Summarization. In this article, we describe our solution to MEDIQA-Chat 2023 Shared Tasks as part of ACL-ClinicalNLP 2023 workshop which benchmarks some of the popular Transformer Architectures such as BioBart, Flan-T5, DialogLED, and OpenAI GPT3 on the problem of Clinical Dialog Summarization. We analyse their performance on two tasks - summarizing short conversations and long conversations. In addition to this, we also benchmark two popular summarization ensemble methods and report their performance."
}
Markdown (Informal)
[HealthMavericks@MEDIQA-Chat 2023: Benchmarking different Transformer based models for Clinical Dialogue Summarization](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.clinicalnlp-1.50/) (Suri et al., ClinicalNLP 2023)
ACL