@inproceedings{kwon-etal-2022-alphatuning, title = "{A}lpha{T}uning: Quantization-Aware Parameter-Efficient Adaptation of Large-Scale Pre-Trained Language Models", author = "Kwon, Se Jung and Kim, Jeonghoon and Bae, Jeongin and Yoo, Kang Min and Kim, Jin-Hwa and Park, Baeseong and Kim, Byeongwook and Ha, Jung-Woo and Sung, Nako and Lee, Dongsoo", editor = "Goldberg, Yoav and Kozareva, Zornitsa and Zhang, Yue", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.240/", doi = "10.18653/v1/2022.findings-emnlp.240", pages = "3288--3305" }