@inproceedings{ye-etal-2021-crossfit,
title = "{C}ross{F}it: A Few-shot Learning Challenge for Cross-task Generalization in {NLP}",
author = "Ye, Qinyuan and
Lin, Bill Yuchen and
Ren, Xiang",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.572/",
doi = "10.18653/v1/2021.emnlp-main.572",
pages = "7163--7189",
abstract = "Humans can learn a new language task efficiently with only few examples, by leveraging their knowledge obtained when learning prior tasks. In this paper, we explore whether and how such cross-task generalization ability can be acquired, and further applied to build better few-shot learners across diverse NLP tasks. We introduce CrossFit, a problem setup for studying cross-task generalization ability, which standardizes seen/unseen task partitions, data access during different learning stages, and the evaluation protocols. To instantiate different seen/unseen task partitions in CrossFit and facilitate in-depth analysis, we present the NLP Few-shot Gym, a repository of 160 diverse few-shot NLP tasks created from open-access NLP datasets and converted to a unified text-to-text format. Our analysis reveals that the few-shot learning ability on unseen tasks can be improved via an upstream learning stage using a set of seen tasks. We also observe that the selection of upstream learning tasks can significantly influence few-shot performance on unseen tasks, asking further analysis on task similarity and transferability."
}
Markdown (Informal)
[CrossFit: A Few-shot Learning Challenge for Cross-task Generalization in NLP](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.572/) (Ye et al., EMNLP 2021)
ACL