@inproceedings{zhou-etal-2023-bridging,
title = "Bridging the Gap between Decision and Logits in Decision-based Knowledge Distillation for Pre-trained Language Models",
author = "Zhou, Qinhong and
Yang, Zonghan and
Li, Peng and
Liu, Yang",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.738/",
doi = "10.18653/v1/2023.acl-long.738",
pages = "13234--13248",
abstract = "Conventional knowledge distillation (KD) methods require access to the internal information of teachers, e.g., logits. However, such information may not always be accessible for large pre-trained language models (PLMs). In this work, we focus on decision-based KD for PLMs, where only teacher decisions (i.e., top-1 labels) are accessible. Considering the information gap between logits and decisions, we propose a novel method to estimate logits from the decision distributions. Specifically, decision distributions can be both derived as a function of logits theoretically and estimated with test-time data augmentation empirically. By combining the theoretical and empirical estimations of the decision distributions together, the estimation of logits can be successfully reduced to a simple root-finding problem. Extensive experiments show that our method significantly outperforms strong baselines on both natural language understanding and machine reading comprehension datasets."
}
Markdown (Informal)
[Bridging the Gap between Decision and Logits in Decision-based Knowledge Distillation for Pre-trained Language Models](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.738/) (Zhou et al., ACL 2023)
ACL