@inproceedings{liu-etal-2022-late,
title = "Late Prompt Tuning: A Late Prompt Could Be Better Than Many Prompts",
author = "Liu, Xiangyang and
Sun, Tianxiang and
Huang, Xuanjing and
Qiu, Xipeng",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.95/",
doi = "10.18653/v1/2022.findings-emnlp.95",
pages = "1325--1338",
abstract = "Prompt tuning is a parameter-efficient tuning (PETuning) method for utilizing pre-trained models (PTMs) that simply prepends a soft prompt to the input and only optimizes the prompt to adapt PTMs to downstream tasks. Although it is parameter- and deployment-efficient, its performance still lags behind other state-of-the-art PETuning methods. Besides, the training cost of prompt tuning is not significantly reduced due to the back-propagation through the entire model. Through empirical analyses, we shed some light on the lagging performance of prompt tuning and recognize a trade-off between the propagation distance from label signals to the inserted prompt and the influence of the prompt on model outputs. Further, we present Late Prompt Tuning (LPT) that inserts a late prompt into an intermediate layer of the PTM instead of the input layer or all layers. The late prompt is obtained by a neural prompt generator conditioned on the hidden states before the prompt insertion layer and therefore is instance-dependent. Through extensive experimental results across various tasks and PTMs, we show that LPT can achieve competitive performance to full model tuning and other PETuning methods under both full-data and few-shot scenarios while possessing faster training speed and lower memory cost."
}
Markdown (Informal)
[Late Prompt Tuning: A Late Prompt Could Be Better Than Many Prompts](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.95/) (Liu et al., Findings 2022)
ACL