@inproceedings{liu-etal-2023-enhancing-scalability,
title = "Enhancing Scalability of Pre-trained Language Models via Efficient Parameter Sharing",
author = "Liu, Peiyu and
Gao, Ze-Feng and
Chen, Yushuo and
Zhao, Xin and
Wen, Ji-Rong",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.920/",
doi = "10.18653/v1/2023.findings-emnlp.920",
pages = "13771--13785",
abstract = "In this paper, we propose a highly parameter-efficient approach to scaling pre-trained language models (PLMs) to a deeper model depth. Unlike prior work that shares all parameters or uses extra blocks, we design a more capable parameter-sharing architecture based on matrix product operator (MPO), an efficient tensor decomposition method to factorize the parameter matrix into a set of local tensors. Based on such a decomposition, we share the important local tensor across all layers for reducing the model size and meanwhile keep layer-specific tensors (also using Adapters) for enhancing the adaptation flexibility. To improve the model training, we further propose a stable initialization algorithm tailored for the MPO-based architecture. Extensive experiments have demonstrated the effectiveness of our proposed model in enhancing scalability and achieving higher performance (i.e., with fewer parameters than BERT-base, we successfully scale the model depth by a factor of 4x and even achieve 0.1 points higher than BERT-large for GLUE score). The code to reproduce the results of this paper can be found at https://github.com/RUCAIBox/MPOBERT-code."
}
Markdown (Informal)
[Enhancing Scalability of Pre-trained Language Models via Efficient Parameter Sharing](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.920/) (Liu et al., Findings 2023)
ACL