@inproceedings{xu-etal-2025-collaborative,
title = "Collaborative Beam Search: Enhancing {LLM} Reasoning via Collective Consensus",
author = "Xu, Yangyifan and
Ren, Shuo and
Zhang, Jiajun",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.574/",
pages = "11409--11421",
ISBN = "979-8-89176-332-6",
abstract = "Complex multi-step reasoning remains challenging for large language models (LLMs). While parallel inference-time scaling methods, such as step-level beam search, offer a promising solution, existing approaches typically depend on either domain-specific external verifiers, or self-evaluation which is brittle and prompt-sensitive. To address these issues, we propose Collaborative Beam Search (CBS), an iterative framework that harnesses the collective intelligence of multiple LLMs across both generation and verification stages. For generation, CBS leverages multiple LLMs to explore a broader search space, resulting in more diverse candidate steps. For verifications, CBS employs a perplexity-based collective consensus among these models, eliminating reliance on an external verifier or complex prompts. Between iterations, CBS leverages a dynamic quota allocation strategy that reassigns generation budget based on each model{'}s past performance, striking a balance between candidate diversity and quality. Experimental results on six tasks across arithmetic, logical, and commonsense reasoning show that CBS outperforms single{-}model scaling and multi-model ensemble baselines by over 4 percentage points in average accuracy, demonstrating its effectiveness and general applicability."
}Markdown (Informal)
[Collaborative Beam Search: Enhancing LLM Reasoning via Collective Consensus](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.574/) (Xu et al., EMNLP 2025)
ACL