@article{schick-schutze-2022-true,
title = "True Few-Shot Learning with {P}rompts{---}{A} Real-World Perspective",
author = {Schick, Timo and
Sch{\"u}tze, Hinrich},
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "10",
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.tacl-1.41/",
doi = "10.1162/tacl_a_00485",
pages = "716--731",
abstract = "Prompt-based approaches excel at few-shot learning. However, Perez et al. (2021) recently cast doubt on their performance as they had difficulty getting good results in a {\textquotedblleft}true{\textquotedblright} few-shot setting in which prompts and hyperparameters cannot be tuned on a dev set. In view of this, we conduct an extensive study of Pet, a method that combines textual instructions with example-based finetuning. We show that, if correctly configured, Pet performs strongly in true few-shot settings without a dev set. Crucial for this strong performance is a number of design choices, including Pet`s ability to intelligently handle multiple prompts. We put our findings to a real-world test by running Pet on RAFT, a benchmark of tasks taken from realistic NLP applications for which no labeled dev or test sets are available. Pet achieves a new state of the art on RAFT and performs close to non-expert humans for 7 out of 11 tasks. These results demonstrate that prompt-based learners can successfully be applied in true few-shot settings and underpin our belief that learning from instructions will play an important role on the path towards human-like few-shot learning capabilities."
}
Markdown (Informal)
[True Few-Shot Learning with Prompts—A Real-World Perspective](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.tacl-1.41/) (Schick & Schütze, TACL 2022)
ACL