@inproceedings{wu-etal-2023-ikm,
title = "{IKM}{\_}{L}ab at {B}io{L}ay{S}umm Task 1: Longformer-based Prompt Tuning for Biomedical Lay Summary Generation",
author = "Wu, Yu-Hsuan and
Lin, Ying-Jia and
Kao, Hung-Yu",
editor = "Demner-fushman, Dina and
Ananiadou, Sophia and
Cohen, Kevin",
booktitle = "The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.bionlp-1.64/",
doi = "10.18653/v1/2023.bionlp-1.64",
pages = "602--610",
abstract = "This paper describes the entry by the Intelligent Knowledge Management (IKM) Laboratory in the BioLaySumm 2023 task1. We aim to transform lengthy biomedical articles into concise, reader-friendly summaries that can be easily comprehended by the general public. We utilized a long-text abstractive summarization longformer model and experimented with several prompt methods for this task. Our entry placed 10th overall, but we were particularly proud to achieve a 3rd place score in the readability evaluation metric."
}
Markdown (Informal)
[IKM_Lab at BioLaySumm Task 1: Longformer-based Prompt Tuning for Biomedical Lay Summary Generation](https://preview.aclanthology.org/fix-sig-urls/2023.bionlp-1.64/) (Wu et al., BioNLP 2023)
ACL