@inproceedings{li-etal-2025-self,
title = "Self-Instructed Derived Prompt Generation Meets In-Context Learning: Unlocking New Potential of Black-Box {LLM}s",
author = "Li, Zhuo and
Du, Yuhao and
Hu, Jinpeng and
Wan, Xiang and
Gao, Anningzhe",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.92/",
pages = "1840--1857",
ISBN = "979-8-89176-251-0",
abstract = "Improving prompt quality is crucial for enhancing the performance of large language models (LLMs), particularly for Black-Box models like GPT4. Existing prompt refinement methods, while effective, often suffer from semantic inconsistencies between refined and original prompts, and fail to maintain users' real intent. To address these challenges, we propose a self-instructed in-context learning framework that generates reliable derived prompts, keeping semantic consistency with the original prompts. Specifically, our framework incorporates a reinforcement learning mechanism, enabling direct interaction with the response model during prompt generation to better align with human preferences. We then formulate the querying as an in-context learning task, combining responses from LLMs with derived prompts to create a contextual demonstration for the original prompt. This approach effectively enhances alignment, reduces semantic discrepancies, and activates the LLM{'}s in-context learning ability for generating more beneficial response. Extensive experiments demonstrate that the proposed method not only generates better derived prompts but also significantly enhances LLMs' ability to deliver more effective responses, particularly for Black-Box models like GPT4."
}
Markdown (Informal)
[Self-Instructed Derived Prompt Generation Meets In-Context Learning: Unlocking New Potential of Black-Box LLMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.92/) (Li et al., ACL 2025)
ACL