@inproceedings{yang-etal-2023-parameter,
title = "Parameter-Efficient Tuning with Special Token Adaptation",
author = "Yang, Xiaocong and
Huang, James Y. and
Zhou, Wenxuan and
Chen, Muhao",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.eacl-main.60/",
doi = "10.18653/v1/2023.eacl-main.60",
pages = "865--872",
abstract = "Parameter-efficient tuning aims at updating only a small subset of parameters when adapting a pretrained model to downstream tasks. In this work, we introduce PASTA, in which we only modify the special token representations (e.g., [SEP] and [CLS] in BERT) before the self-attention module at each layer in Transformer-based models. PASTA achieves comparable performance to fine-tuning in natural language understanding tasks including text classification and NER with up to only 0.029{\%} of total parameters trained. Our work not only provides a simple yet effective way of parameter-efficient tuning, which has a wide range of practical applications when deploying finetuned models for multiple tasks, but also demonstrates the pivotal role of special tokens in pretrained language models."
}
Markdown (Informal)
[Parameter-Efficient Tuning with Special Token Adaptation](https://preview.aclanthology.org/fix-sig-urls/2023.eacl-main.60/) (Yang et al., EACL 2023)
ACL
- Xiaocong Yang, James Y. Huang, Wenxuan Zhou, and Muhao Chen. 2023. Parameter-Efficient Tuning with Special Token Adaptation. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, pages 865–872, Dubrovnik, Croatia. Association for Computational Linguistics.