@inproceedings{xu-etal-2025-softcot,
title = "{S}oft{C}o{T}: Soft Chain-of-Thought for Efficient Reasoning with {LLM}s",
author = "Xu, Yige and
Guo, Xu and
Zeng, Zhiwei and
Miao, Chunyan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1137/",
pages = "23336--23351",
ISBN = "979-8-89176-251-0",
abstract = "Chain-of-Thought (CoT) reasoning enables Large Language Models (LLMs) to solve complex reasoning tasks by generating intermediate reasoning steps. However, most existing approaches focus on hard token decoding, which constrains reasoning within the discrete vocabulary space and may not always be optimal. While recent efforts explore continuous-space reasoning, they often require full-model fine-tuning and suffer from catastrophic forgetting, limiting their applicability to state-of-the-art LLMs that already perform well in zero-shot settings with a proper instruction. To address this challenge, we propose a novel approach for continuous-space reasoning that does not require modifying the LLM. Specifically, we employ a lightweight fixed assistant model to speculatively generate instance-specific soft thought tokens as the initial chain of thoughts, which are then mapped into the LLM{'}s representation space via a trainable projection module. Experimental results on five reasoning benchmarks demonstrate that our method enhances LLM reasoning performance through supervised, parameter-efficient fine-tuning. Source code is available at https://github.com/xuyige/SoftCoT."
}
Markdown (Informal)
[SoftCoT: Soft Chain-of-Thought for Efficient Reasoning with LLMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1137/) (Xu et al., ACL 2025)
ACL