@inproceedings{mao-etal-2025-temporalizing,
title = "Temporalizing Confidence: Evaluation of Chain-of-Thought Reasoning with Signal Temporal Logic",
author = "Mao, Zhenjiang and
Bisliouk, Artem and
Nama, Rohith and
Ruchkin, Ivan",
editor = {Kochmar, Ekaterina and
Alhafni, Bashar and
Bexte, Marie and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng},
booktitle = "Proceedings of the 20th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.bea-1.65/",
pages = "882--890",
ISBN = "979-8-89176-270-1",
abstract = "Large Language Models (LLMs) have shown impressive performance in mathematical reasoning tasks when guided by Chain-of-Thought (CoT) prompting. However, they tend to produce highly confident yet incorrect outputs, which poses significant risks in domains like education, where users may lack the expertise to assess reasoning steps. To address this, we propose a structured framework that models stepwise confidence as a temporal signal and evaluates it using Signal Temporal Logic (STL). In particular, we define formal STL-based constraints to capture desirable temporal properties and compute robustness scores that serve as structured, interpretable confidence estimates. Our approach also introduces a set of uncertainty reshaping strategies to enforce smoothness, monotonicity, and causal consistency across the reasoning trajectory. Experiments show that our approach consistently improves calibration metrics and provides more reliable uncertainty estimates than conventional confidence aggregation and post-hoc calibration."
}
Markdown (Informal)
[Temporalizing Confidence: Evaluation of Chain-of-Thought Reasoning with Signal Temporal Logic](https://preview.aclanthology.org/landing_page/2025.bea-1.65/) (Mao et al., BEA 2025)
ACL