@inproceedings{wang-etal-2025-sopl,
title = "{SOPL}: A Sequential Optimal Learning Approach to Automated Prompt Engineering in Large Language Models",
author = "Wang, Shuyang and
Moazeni, Somayeh and
Klabjan, Diego",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.1155/",
doi = "10.18653/v1/2025.findings-emnlp.1155",
pages = "21172--21185",
ISBN = "979-8-89176-335-7",
abstract = "Designing effective prompts is essential to guiding large language models (LLMs) toward desired responses. Automated prompt engineering aims to reduce reliance on manual efforts by streamlining the design, refinement, and optimization of natural language prompts. This paper proposes an optimal learning framework for automated prompt engineering for black-box models, designed to sequentially identify effective prompt features under limited evaluation budgets. We introduce a feature-based method to express prompt templates, which significantly broadens the search space. Bayesian regression is employed to utilize correlations among similar prompts, accelerating the learning process. To efficiently explore the large space of prompt features, we adopt the forward-looking Knowledge-Gradient (KG) policy for sequential optimal learning efficiently by solving mixed-integer second-order cone optimization problems, making it scalable and capable of accommodating prompts characterized only through constraints. Our method significantly outperforms a set of benchmark strategies assessed on instruction induction tasks within limited iterations of prompt evaluations, showing the potential of optimal learning for efficient prompt learning."
}Markdown (Informal)
[SOPL: A Sequential Optimal Learning Approach to Automated Prompt Engineering in Large Language Models](https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.1155/) (Wang et al., Findings 2025)
ACL