@inproceedings{choi-etal-2025-cac,
title = "{CAC}-{C}o{T}: Connector-Aware Compact Chain-of-Thought for Efficient Reasoning Data Synthesis Across Dual-System Cognitive Tasks",
author = "Choi, Sunguk and
Kwon, Yonghoon and
Lee, Heondeuk",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1062/",
doi = "10.18653/v1/2025.findings-emnlp.1062",
pages = "19515--19530",
ISBN = "979-8-89176-335-7",
abstract = "Long chain-of-thought (CoT) prompting helps Large Language Models (LLMs) solve difficult problems, but very long traces often slow or even degrade performance on fast, intuitive ``System-1'' tasks. We introduce Connector-Aware Compact CoT (CAC-CoT) {---} a method that deliberately restricts reasoning to a small, fixed set of connector phrases, steering the model toward concise and well {---} structured explanations. Despite its simplicity, our synthetic method with general-purpose LLMs yields a high-quality training quality. CAC-CoT achieves $\approx$ 85{\%} on GSM8K and $\approx$ 40{\%} on GPQA (System-2) while also achieving $\approx$ 85{\%} on S1-Bench (System-1), surpassing the baseline by over 20{\%}. Its reasoning traces average $\approx$ 300 tokens(ART), about one-third the length of baseline traces, delivering higher efficiency without loss of accuracy."
}Markdown (Informal)
[CAC-CoT: Connector-Aware Compact Chain-of-Thought for Efficient Reasoning Data Synthesis Across Dual-System Cognitive Tasks](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1062/) (Choi et al., Findings 2025)
ACL