@inproceedings{kharrat-etal-2025-acing,
title = "{ACING}: Actor-Critic for Instruction Learning in Black-Box {LLM}s",
author = "Kharrat, Salma and
Fourati, Fares and
Canini, Marco",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.965/",
doi = "10.18653/v1/2025.emnlp-main.965",
pages = "19086--19113",
ISBN = "979-8-89176-332-6",
abstract = "The effectiveness of Large Language Models (LLMs) in solving tasks depends significantly on the quality of their instructions, which often require substantial human effort to craft. This underscores the need for automated instruction optimization. However, optimizing instructions is particularly challenging when working with black-box LLMs, where model parameters and gradients are inaccessible. We introduce ACING, an actor-critic reinforcement learning framework that formulates instruction optimization as a stateless, continuous-action problem, enabling exploration of infinite instruction spaces using only black-box feedback. ACING automatically discovers prompts that outperform human-written prompts in 76{\%} of instruction-induction tasks, with gains of up to 33 points and a 10-point median improvement over the best automatic baseline in 33 tasks spanning instruction-induction, summarization, and chain-of-thought reasoning. Extensive ablations highlight its robustness and efficiency. An implementation of ACING is available at \url{https://github.com/salmakh1/ACING}."
}Markdown (Informal)
[ACING: Actor-Critic for Instruction Learning in Black-Box LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.965/) (Kharrat et al., EMNLP 2025)
ACL