@inproceedings{cho-etal-2023-discrete,
title = "Discrete Prompt Optimization via Constrained Generation for Zero-shot Re-ranker",
author = "Cho, Sukmin and
Jeong, Soyeong and
Seo, Jeong yeon and
Park, Jong",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2023.findings-acl.61/",
doi = "10.18653/v1/2023.findings-acl.61",
pages = "960--971",
abstract = "Re-rankers, which order retrieved documents with respect to the relevance score on the given query, have gained attention for the information retrieval (IR) task. Rather than fine-tuning the pre-trained language model (PLM), the large-scale language model (LLM) is utilized as a zero-shot re-ranker with excellent results. While LLM is highly dependent on the prompts, the impact and the optimization of the prompts for the zero-shot re-ranker are not explored yet. Along with highlighting the impact of optimization on the zero-shot re-ranker, we propose a novel discrete prompt optimization method, Constrained Prompt generation (Co-Prompt), with the metric estimating the optimum for re-ranking. Co-Prompt guides the generated texts from PLM toward optimal prompts based on the metric without parameter update. The experimental results demonstrate that Co-Prompt leads to outstanding re-ranking performance against the baselines. Also, Co-Prompt generates more interpretable prompts for humans against other prompt optimization methods."
}
Markdown (Informal)
[Discrete Prompt Optimization via Constrained Generation for Zero-shot Re-ranker](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2023.findings-acl.61/) (Cho et al., Findings 2023)
ACL