@inproceedings{habba-etal-2025-promptsuite,
title = "{P}rompt{S}uite: A Task-Agnostic Framework for Multi-Prompt Generation",
author = "Habba, Eliya and
Dahan, Noam and
Lior, Gili and
Stanovsky, Gabriel",
editor = {Habernal, Ivan and
Schulam, Peter and
Tiedemann, J{\"o}rg},
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-demos.19/",
pages = "254--263",
ISBN = "979-8-89176-334-0",
abstract = "Evaluating LLMs with a single prompt has proven unreliable, with small changes leading to significant performance differences. However, generating the prompt variations needed for a more robust multi-prompt evaluation is challenging, limiting its adoption in practice. To address this, we introduce PromptSuite, a framework that enables the automatic generation of various prompts. PromptSuite is flexible {--} working out of the box on a wide range of tasks and benchmarks. It follows a modular prompt design, allowing controlled perturbations to each component, and is extensible, supporting the addition of new components and perturbation types. Through a series of case studies, we show that PromptSuite provides meaningful variations to support strong evaluation practices. All resources, including the Python API, source code, user-friendly web interface, and demonstration video, are available at: https://eliyahabba.github.io/PromptSuite/."
}Markdown (Informal)
[PromptSuite: A Task-Agnostic Framework for Multi-Prompt Generation](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-demos.19/) (Habba et al., EMNLP 2025)
ACL