@inproceedings{zhang-etal-2022-making,
title = "Making Pretrained Language Models Good Long-tailed Learners",
author = "Zhang, Chen and
Ren, Lei and
Wang, Jingang and
Wu, Wei and
Song, Dawei",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.emnlp-main.217/",
doi = "10.18653/v1/2022.emnlp-main.217",
pages = "3298--3312",
abstract = "Prompt-tuning has shown appealing performance in few-shot classification by virtue of its capability in effectively exploiting pre-trained knowledge. This motivates us to check the hypothesis that prompt-tuning is also a promising choice for long-tailed classification, since the tail classes are intuitively few-shot ones. To achieve this aim, we conduct empirical studies to examine the hypothesis. The results demonstrate that prompt-tuning makes pretrained language models at least good long-tailed learners. For intuitions on why prompt-tuning can achieve good performance in long-tailed classification, we carry out in-depth analyses by progressively bridging the gap between prompt-tuning and commonly used finetuning. The summary is that the classifier structure and parameterization form the key to making good long-tailed learners, in comparison with the less important input structure. Finally, we verify the applicability of our finding to few-shot classification."
}
Markdown (Informal)
[Making Pretrained Language Models Good Long-tailed Learners](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.emnlp-main.217/) (Zhang et al., EMNLP 2022)
ACL
- Chen Zhang, Lei Ren, Jingang Wang, Wei Wu, and Dawei Song. 2022. Making Pretrained Language Models Good Long-tailed Learners. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 3298–3312, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.