@inproceedings{abdelmalak-2025-abdelmalak,
title = "Abdelmalak at {P}er{A}ns{S}umm 2025: Leveraging a Domain-Specific {BERT} and {LL}a{MA} for Perspective-Aware Healthcare Answer Summarization",
author = "Abdelmalak, Abanoub",
editor = "Ananiadou, Sophia and
Demner-Fushman, Dina and
Gupta, Deepak and
Thompson, Paul",
booktitle = "Proceedings of the Second Workshop on Patient-Oriented Language Processing (CL4Health)",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cl4health-1.39/",
pages = "428--436",
ISBN = "979-8-89176-238-1",
abstract = "The PerAnsSumm Shared Task - CL4Health@NAACL 2025 aims to enhance healthcare community question-answering (CQA) by summarizing diverse user perspectives. It consists of two tasks: identifying and classifying perspective-specific spans (Task A) and generating structured, perspective-specific summaries from question-answer threads (Task B). The dataset used for this task is the PUMA dataset. For Task A, a COVID-Twitter-BERT model pre-trained on COVID-related text from Twitter was employed, improving the model{'}s understanding of relevant vocabulary and context. For Task B, LLaMA was utilized in a prompt-based fashion. The proposed approach achieved 9th place in Task A and 16th place overall, with the best proportional classification F1-score of 0.74."
}
Markdown (Informal)
[Abdelmalak at PerAnsSumm 2025: Leveraging a Domain-Specific BERT and LLaMA for Perspective-Aware Healthcare Answer Summarization](https://preview.aclanthology.org/fix-sig-urls/2025.cl4health-1.39/) (Abdelmalak, CL4Health 2025)
ACL