@inproceedings{tikhonov-etal-2026-confidence,
title = "Confidence Leaps in {LLM} Reasoning: Early Stopping and Cross-Model Transfer",
author = "Tikhonov, Pavel and
Oseledets, Ivan and
Tutubalina, Elena",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 2: Short Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-short.43/",
pages = "602--616",
ISBN = "979-8-89176-381-4",
abstract = "We challenge the common assumption that Large Language Models (LLMs) build confidence gradually during reasoning. Instead, we find that conviction is often reached in a discrete ``moment of insight'', characterized by a sudden and sharp increase in an answer{'}s probability-a phenomenon we term a ``confidence leap''. Leveraging this discovery, we introduce a training-free, model-agnostic early-stopping heuristic that halts generation upon detecting such a leap, significantly reducing the generation length without sacrificing accuracy. We also demonstrate that the reasoning text leading up to this leap is semantically potent and transferable: feeding this partial reasoning to a different model family substantially boosts its performance. This suggests that the ``confidence leap'' marks a shared, interpretable reasoning milestone, not just a model-specific statistical artifact."
}Markdown (Informal)
[Confidence Leaps in LLM Reasoning: Early Stopping and Cross-Model Transfer](https://preview.aclanthology.org/ingest-eacl/2026.eacl-short.43/) (Tikhonov et al., EACL 2026)
ACL