@inproceedings{pandian-etal-2025-snap,
title = "Snap Out of It: A Dual-Process Approach to Mitigating Overthinking in Language Model Reasoning",
author = "Pandian, Ashish and
Lojo, Nelson and
Lai, Wei Xun and
Lukas, Jackson",
editor = "Kamalloo, Ehsan and
Gontier, Nicolas and
Lu, Xing Han and
Dziri, Nouha and
Murty, Shikhar and
Lacoste, Alexandre",
booktitle = "Proceedings of the 1st Workshop for Research on Agent Language Models (REALM 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.realm-1.16/",
pages = "228--249",
ISBN = "979-8-89176-264-0",
abstract = "Large Language Models (LLMs) have shown impressive capabilities in text generation and reasoning but still struggle with overthinking and analysis paralysis in interactive, multi-step tasks. In this paper, we introduce two complementary contributions aimed at mitigating these challenges. First, we propose Think, Validate, Consensus (TVC){---}a multi-agent system inspired by Rational Speech Act (RSA) theory{---}that enables LLMs to recursively model each other{'}s mental states and detect overthinking in interactive environments. We take inspiration from RSA to model the recursive reasoning about communicative intent that underlies human collaboration, complementing models of individual reasoning. Second, we present Snap-Think, a dual-mode mechanism that combines fast, intuitive interaction (System 1) with slower, deliberative reasoning (System 2) to break free from reasoning loops detected by TVC. We evaluate our approach using New York Times Connections puzzles and demonstrate significant improvements: Snap-Think achieves 98{\%} solve rate on GPT-4o compared to Chain-of-Thought{'}s 72{\%}, while maintaining superior semantic grounding and efficiency over traditional strategies. Our findings suggest that integrating human-inspired cognitive frameworks into LLM architectures can effectively counteract overthinking and enhance complex problem-solving capabilities. We make our code available at: https://github.com/Chrislai502/the{\_}amazing{\_}connections"
}
Markdown (Informal)
[Snap Out of It: A Dual-Process Approach to Mitigating Overthinking in Language Model Reasoning](https://preview.aclanthology.org/landing_page/2025.realm-1.16/) (Pandian et al., REALM 2025)
ACL