@inproceedings{choo-hong-2026-modulating,
title = "Modulating Multi-Label Tendency in Zero-Shot {LLM} Coding: The Effect of Output Structure on {CDSS} Feedback Analysis",
author = "Choo, Hyunwoo and
Hong, Sungsoo",
editor = {Danilova, Vera and
Kurfal{\i}, Murathan and
S{\"o}derfeldt, Ylva and
Reed, Julia and
Burchell, Andrew},
booktitle = "Proceedings of the 1st Workshop on Linguistic Analysis for Health ({H}ea{L}ing 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.healing-1.14/",
pages = "172--179",
ISBN = "979-8-89176-367-8",
abstract = "Large language models (LLMs) often default to single-label classification in zero-shot multi-label tasks{---}a tendency we term ``conservative default''. While few-shot prompting mitigates this, it introduces ``example bias''. We evaluate zero-shot strategies to modulate this tendency using 1,441 healthcare feedback records and two LLMs. We compare instruction-based methods with structural constraints that modify the token generation sequence, specifically an Enum-First format requiring domain enumeration before selection. Results show that structural constraints substantially reduce single-label rates (Magistral: 96{\%} {\textrightarrow} 19{\%}; Qwen3: 54{\%} {\textrightarrow} 0.0{\%}), though the latter suggests potential over-correction compared to human baselines (16.7{--}41.3{\%}). These findings indicate that while output structure is a potent modulator of classification behavior by shifting the decision point upstream, its effect magnitude is model-dependent, necessitating empirical calibration to prevent spurious associations."
}Markdown (Informal)
[Modulating Multi-Label Tendency in Zero-Shot LLM Coding: The Effect of Output Structure on CDSS Feedback Analysis](https://preview.aclanthology.org/ingest-eacl/2026.healing-1.14/) (Choo & Hong, HeaLing 2026)
ACL