@inproceedings{jain-etal-2026-casper,
title = "{CASPER}: Bridging Discrete and Continuous Prompt Optimization through Feedback-Guided Gradient Descent",
author = "Jain, Aryan and
Ghosh, Pushpendu and
Yenigalla, Promod",
editor = {Matusevych, Yevgen and
Eryi{\u{g}}it, G{\"u}l{\c{s}}en and
Aletras, Nikolaos},
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 5: Industry Track)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-industry.32/",
pages = "425--437",
ISBN = "979-8-89176-384-5",
abstract = "Workflow automation is critical for reducing manual efforts in industries, yet existing pipelines fail to handle generative tasks like summarization and extraction without pre-built tools, forcing human intervention. While LLM-based agents offer solutions, their creation depends heavily on prompt engineering{---}a resource-intensive process often yielding suboptimal results. Current automated approaches face a fundamental trade-off: discrete optimization produces overfitted prompts without convergence guarantees due to non-convex landscapes, while continuous gradient-based methods generate semantically incoherent prompts through embedding optimization. We propose CASPER, a framework bridging discrete and continuous prompt optimization through feedback-guided gradient descent in embedding space. CASPER employs a feedback module producing detailed error analyses that capture failure modes as optimization signals. These insights are projected with prompt tokens into embedding space to steer gradient descent. To preserve interpretability, we incorporate fluency regularization that penalizes incomprehensible tokens. We further accelerate convergence through synthetic data generation that oversamples failure cases, while also addressing data scarcity in industrial settings. We evaluate CASPER on WDC, DROP, GSM8K with F1 improvements of 2.3{\%}, 1.6{\%}, 2.3{\%} and VQA, internal benchmarks showing accuracy improvements of 1.1{\%}, 3{\%}, demonstrating cross-domain generalizability."
}Markdown (Informal)
[CASPER: Bridging Discrete and Continuous Prompt Optimization through Feedback-Guided Gradient Descent](https://preview.aclanthology.org/ingest-eacl/2026.eacl-industry.32/) (Jain et al., EACL 2026)
ACL