@inproceedings{zhai-etal-2025-optimizing,
title = "Optimizing Reasoning for Text-to-{SQL} with Execution Feedback",
author = "Zhai, Bohan and
Xu, Canwen and
He, Yuxiong and
Yao, Zhewei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.982/",
pages = "19206--19218",
ISBN = "979-8-89176-256-5",
abstract = "Text-to-SQL demands precise reasoning to convert natural language questions into structured queries. While large language models (LLMs) excel in many reasoning tasks, their ability to leverage Chain-of-Thought (CoT) reasoning for text-to-SQL remains underexplored. We identify critical limitations: zero-shot CoT offers minimal gains, and Direct Preference Optimization (DPO) applied without CoT yields marginal improvements. We propose ExCoT-DPO, a novel framework that iteratively optimizes open-source LLMs by combining CoT reasoning with off-policy and on-policy DPO, relying solely on execution accuracy as feedback. This approach eliminates the need for reward models or human-annotated preferences. Our experimental results demonstrate significant performance gains: ExCoT-DPO improves execution accuracy on BIRD from 57.37{\%} to 68.51{\%} and on Spider from 78.81{\%} to 86.59{\%} for LLaMA-3 70B, with Qwen-2.5-Coder demonstrating similar improvements. Our best model achieves state-of-the-art performance in the single-model setting on both BIRD and Spider datasets."
}
Markdown (Informal)
[Optimizing Reasoning for Text-to-SQL with Execution Feedback](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.982/) (Zhai et al., Findings 2025)
ACL