@inproceedings{lanz-pecina-2025-cuni,
title = "{CUNI}-a at {A}rch{EHR}-{QA} 2025: Do we need Giant {LLM}s for Clinical {QA}?",
author = "Lanz, Vojtech and
Pecina, Pavel",
editor = "Soni, Sarvesh and
Demner-Fushman, Dina",
booktitle = "BioNLP 2025 Shared Tasks",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bionlp-share.4/",
pages = "27--40",
ISBN = "979-8-89176-276-3",
abstract = "In this paper, we present our submission to the ArchEHR-QA 2025 shared task, which focuses on answering patient questions based on excerpts from electronic health record (EHR) discharge summaries. Our approach identifies essential sentences relevant to a patient{'}s question using a combination of few-shot inference with the Med42-8B model, cosine similarity over clinical term embeddings, and the MedCPT cross-encoder relevance model. Then, concise answers are generated on the basis of these selected sentences. Despite not relying on large language models (LLMs) with tens of billions of parameters, our method achieves competitive results, demonstrating the potential of resource-efficient solutions for clinical NLP applications."
}
Markdown (Informal)
[CUNI-a at ArchEHR-QA 2025: Do we need Giant LLMs for Clinical QA?](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bionlp-share.4/) (Lanz & Pecina, BioNLP 2025)
ACL