@inproceedings{zhan-etal-2024-unlocking,
title = "Unlocking Black-Box Prompt Tuning Efficiency via Zeroth-Order Optimization",
author = "Zhan, Heshen and
Chen, Congliang and
Ding, Tian and
Li, Ziniu and
Sun, Ruoyu",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.871/",
doi = "10.18653/v1/2024.findings-emnlp.871",
pages = "14825--14838",
abstract = "Prompt optimization emerges as an important technique for adapting Large Language Models (LLMs) to specific tasks. Unfortunately, LLM proprietors often limit access to models' internal weights, confining users to inference API services. This restriction poses a significant challenge for prompt optimization, as conventional optimization-based algorithms rely heavily on gradient information, which is unavailable via inference APIs. Addressing this challenge, this paper presents the Zeroth-Order Tuning (ZOT) approach, which enables efficient prompt tuning solely via inference APIs. ZOT adopts the zeroth-order optimization framework, utilizing finite differences to approximate gradient information. We further incorporate ZOT with gradient clipping and momentum techniques to enhance the tuning effectiveness. Experimental results show that ZOT outperforms existing black-box prompt tuning methods in terms of both task-specific performance and convergence speed. Furthermore, we provide a theoretical explanation for the unexpectedly strong performance of zeroth-order methods on LLM prompt tuning. By introducing the concept of effective dimension, we establish a strong connection between the inherently low effective dimension of prompt spaces and the superior convergence speed of zeroth-order methods. Our code is available at https://github.com/ZhanHeshen/ZOT."
}
Markdown (Informal)
[Unlocking Black-Box Prompt Tuning Efficiency via Zeroth-Order Optimization](https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.871/) (Zhan et al., Findings 2024)
ACL