@inproceedings{antony-etal-2025-lightweight,
title = "Lightweight {LLM} Adaptation for Medical Summarisation: Roux-lette at {P}er{A}ns{S}umm Shared Task",
author = "Antony, Anson and
Vickers, Peter and
Wendelken, Suzanne",
editor = "Ananiadou, Sophia and
Demner-Fushman, Dina and
Gupta, Deepak and
Thompson, Paul",
booktitle = "Proceedings of the Second Workshop on Patient-Oriented Language Processing (CL4Health)",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.cl4health-1.35/",
pages = "389--397",
ISBN = "979-8-89176-238-1",
abstract = "The PerAnsSumm Shared Task at CL4Health@NAACL 2025 focused on Perspective-Aware Summarization of Healthcare Q/A forums, requiring participants to extract and summarize spans based on predefined perspective categories. Our approach leveraged LLM-based zero-shot prompting enhanced by semantically-similar In-Context Learning (ICL) examples. Using Qwen-Turbo with 20 exemplar samples retrieved through NV-Embed-v2 embeddings, we achieved a mean score of 0.58 on Task A (span identification) and Task B (summarization) mean scores of 0.36 in Relevance and 0.28 in Factuality, finishing 12th on the final leaderboard. Notably, our system achieved higher precision in strict matching (0.20) than the top-performing system, demonstrating the effectiveness of our post-processing techniques. In this paper, we detail our ICL approach for adapting Large Language Models to Perspective-Aware Medical Summarization, analyze the improvements across development iterations, and finally discuss both the limitations of the current evaluation framework and future challenges in modeling this task. We release our code for reproducibility."
}
Markdown (Informal)
[Lightweight LLM Adaptation for Medical Summarisation: Roux-lette at PerAnsSumm Shared Task](https://preview.aclanthology.org/fix-sig-urls/2025.cl4health-1.35/) (Antony et al., CL4Health 2025)
ACL