@inproceedings{choi-etal-2026-llm,
title = "{LLM} Plug-ins Are Not a Free Lunch for Clinical Time-Series Prediction",
author = "Choi, Juhwan and
Lee, Kwanhyung and
Hahn, Sangchul and
Yang, Eunho",
editor = {Danilova, Vera and
Kurfal{\i}, Murathan and
S{\"o}derfeldt, Ylva and
Reed, Julia and
Burchell, Andrew},
booktitle = "Proceedings of the 1st Workshop on Linguistic Analysis for Health ({H}ea{L}ing 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.healing-1.17/",
pages = "203--211",
ISBN = "979-8-89176-367-8",
abstract = "Inspired by recent plug-in frameworks that repurpose frozen layers from large language models (LLMs) as inductive priors, we explore whether such mechanisms can be extended to clinical time-series prediction without textual inputs or LLM fine-tuning. We introduce a lightweight plug-in architecture that inserts a single frozen LLM Transformer layer between an aggregated time-series representation and the prediction head. Unlike prior work focused on vision or language tasks, our study targets clinical time-series data, where LLMs typically underperform when applied directly.Experiments on two ICU prediction tasks from MIMIC-III show that the proposed plug-in exhibits heterogeneous effects across different backbones and tasks, with occasional performance improvements and minimal computational overhead. We further compare general-purpose and medical-domain LLM layers under an identical plug-in setting, analyzing how domain specialization interacts with clinical time-series models. Overall, our results highlight important limitations of frozen LLM plug-ins and motivate future work on understanding the conditions under which such layers may be beneficial."
}Markdown (Informal)
[LLM Plug-ins Are Not a Free Lunch for Clinical Time-Series Prediction](https://preview.aclanthology.org/ingest-eacl/2026.healing-1.17/) (Choi et al., HeaLing 2026)
ACL