@inproceedings{muntean-betts-2025-addressing,
title = "Addressing Few-Shot {LLM} Classification Instability Through Explanation-Augmented Distillation",
author = "Muntean, William and
Betts, Joe",
editor = "Wilson, Joshua and
Ormerod, Christopher and
Beiting Parrish, Magdalen",
booktitle = "Proceedings of the Artificial Intelligence in Measurement and Education Conference (AIME-Con): Works in Progress",
month = oct,
year = "2025",
address = "Wyndham Grand Pittsburgh, Downtown, Pittsburgh, Pennsylvania, United States",
publisher = "National Council on Measurement in Education (NCME)",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.aimecon-wip.24/",
pages = "197--203",
ISBN = "979-8-218-84229-1",
abstract = "This study compares explanation-augmented knowledge distillation with few-shot in-context learning for LLM-based exam question classification. Fine-tuned smaller language models achieved competitive performance with greater consistency than large mode few-shot approaches, which exhibited notable variability across different examples. Hyperparameter selection proved essential, with extremely low learning rates significantly impairing model performance."
}Markdown (Informal)
[Addressing Few-Shot LLM Classification Instability Through Explanation-Augmented Distillation](https://preview.aclanthology.org/name-variant-enfa-fane/2025.aimecon-wip.24/) (Muntean & Betts, AIME-Con 2025)
ACL