@inproceedings{xu-etal-2022-zeroprompt, title = "{Z}ero{P}rompt: Scaling Prompt-Based Pretraining to 1,000 Tasks Improves Zero-Shot Generalization", author = "Xu, Hanwei and Chen, Yujun and Du, Yulun and Shao, Nan and Yanggang, Wang and Li, Haiyu and Yang, Zhilin", editor = "Goldberg, Yoav and Kozareva, Zornitsa and Zhang, Yue", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.312/", doi = "10.18653/v1/2022.findings-emnlp.312", pages = "4235--4252" }