@inproceedings{ye-etal-2024-prompt,
title = "Prompt Engineering a Prompt Engineer",
author = "Ye, Qinyuan and
Ahmed, Mohamed and
Pryzant, Reid and
Khani, Fereshte",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.21/",
doi = "10.18653/v1/2024.findings-acl.21",
pages = "355--385",
abstract = "Prompt engineering is a challenging yet crucial task for optimizing the performance of large language models on customized tasks. It requires complex reasoning to examine the model`s errors, hypothesize what is missing or misleading in the current prompt, and communicate the task with clarity. While recent works indicate that large language models can be meta-prompted to perform automatic prompt engineering, we argue that their potential is limited due to insufficient guidance for complex reasoning in the meta-prompt. We fill this gap by infusing into the meta-prompt three key components: detailed descriptions, context specification, and a step-by-step reasoning template. The resulting method, named PE2, showcases remarkable versatility across diverse language tasks. It finds prompts that outperform {\textquotedblleft}let`s think step by step{\textquotedblright} by 6.3{\%} on MultiArith and 3.1{\%} on GSM8K, and outperforms competitive baselines on counterfactual tasks by 6.9{\%}. Further, we show that PE2 can make targeted prompt edits, rectify erroneous prompts, and induce multi-step plans for complex tasks."
}
Markdown (Informal)
[Prompt Engineering a Prompt Engineer](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.21/) (Ye et al., Findings 2024)
ACL
- Qinyuan Ye, Mohamed Ahmed, Reid Pryzant, and Fereshte Khani. 2024. Prompt Engineering a Prompt Engineer. In Findings of the Association for Computational Linguistics: ACL 2024, pages 355–385, Bangkok, Thailand. Association for Computational Linguistics.