@inproceedings{wang-etal-2025-stepwise-informativeness,
title = "Stepwise Informativeness Search for Improving {LLM} Reasoning",
author = "Wang, Siyuan and
Zhao, Enda and
Ren, Xiang",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1285/",
pages = "25291--25309",
ISBN = "979-8-89176-332-6",
abstract = "Advances in Large Language Models (LLMs) have improved multi-step reasoning by generating free-text rationales, but these models tend to lose focus over the middle of long contexts. This raises concerns that as reasoning progresses, LLMs may overlook information in earlier steps when decoding subsequent steps, leading to unreliable and redundant rationales. To address this, we propose guiding LLMs to generate more accurate and concise rationales by (1) proactively referencing information from underutilized prior steps, and (2) minimizing redundant information between new and existing steps. We introduce \textit{stepwise informativeness search}, an inference-time tree search framework incorporating two selection heuristics: grounding-guided selection which prioritizes steps paying higher attention over underutilized steps; and novelty-guided selection which encourages steps with novel conclusions. We further utilize a self-grounding strategy that prompts LLMs to explicitly reference relevant prior steps as premises before deduction at each step, mitigating distraction from irrelevant content. Experiments on five reasoning datasets across five LLMs show the effectiveness and efficiency of our approach to improve reasoning with reduced errors and redundancy."
}Markdown (Informal)
[Stepwise Informativeness Search for Improving LLM Reasoning](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1285/) (Wang et al., EMNLP 2025)
ACL