@inproceedings{kim-etal-2026-think,
title = "Think Just Enough: Leveraging Self-Assessed Confidence for Adaptive Reasoning in Language Models",
author = "Kim, Junyeob and
Lee, Sang-goo and
Kim, Taeuk",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.263/",
pages = "5000--5006",
ISBN = "979-8-89176-386-9",
abstract = "Recent reinforcement learning (RL)-trained language models have demonstrated strong performance on complex reasoning tasks by producing long and detailed reasoning traces. However, despite these advancements, they often struggle with finding the right balance in reasoning length: some terminate prematurely before reaching a correct answer (underthinking), while others continue reasoning beyond necessity, leading to inefficiency or even degraded accuracy (overthinking).To address these challenges, we propose a method for optimizing reasoning length via self-assessed confidence. By prompting the model to evaluate its own confidence at intermediate reasoning steps, we enable dynamic stopping once sufficient reasoning is achieved.Experiments across multiple reasoning benchmarks show that our approach improves computational efficiency without compromising answer quality. Furthermore, we find that confidence estimates from RL-trained reasoning models are more reliable than those from standard LLMs, making it a valuable internal signal for controlling reasoning depth."
}Markdown (Informal)
[Think Just Enough: Leveraging Self-Assessed Confidence for Adaptive Reasoning in Language Models](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.263/) (Kim et al., Findings 2026)
ACL