@inproceedings{joshi-etal-2025-calibration,
title = "Calibration Across Layers: Understanding Calibration Evolution in {LLM}s",
author = "Joshi, Abhinav and
Ahmad, Areeb and
Modi, Ashutosh",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.742/",
pages = "14697--14725",
ISBN = "979-8-89176-332-6",
abstract = "Large Language Models (LLMs) have demonstrated inherent calibration capabilities, where predicted probabilities align well with correctness, despite prior findings that deep neural networks are often overconfident. Recent studies have linked this behavior to specific components in the final layer, such as entropy neurons and the unembedding matrix{'}s null space. In this work, we provide a complementary perspective by investigating how calibration evolves throughout the network{'}s depth. Analyzing multiple open-weight models on the MMLU benchmark, we uncover a distinct \textit{confidence correction phase} in the upper/later layers, where model confidence is actively recalibrated after decision certainty has been reached. Furthermore, we identify a low-dimensional \textit{calibration direction} in the residual stream whose perturbation significantly improves calibration metrics (ECE and MCE) without harming accuracy. Our findings suggest that calibration is a distributed phenomenon, shaped throughout the network{'}s forward pass, not just in its final projection, providing new insights into how confidence-regulating mechanisms operate within LLMs."
}Markdown (Informal)
[Calibration Across Layers: Understanding Calibration Evolution in LLMs](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.742/) (Joshi et al., EMNLP 2025)
ACL