@inproceedings{hahn-choi-2019-self,
title = "Self-Knowledge Distillation in Natural Language Processing",
author = "Hahn, Sangchul and
Choi, Heeyoul",
editor = "Mitkov, Ruslan and
Angelova, Galia",
booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://preview.aclanthology.org/fix-sig-urls/R19-1050/",
doi = "10.26615/978-954-452-056-4_050",
pages = "423--430",
abstract = "Since deep learning became a key player in natural language processing (NLP), many deep learning models have been showing remarkable performances in a variety of NLP tasks. Such high performance can be explained by efficient knowledge representation of deep learning models. Knowledge distillation from pretrained deep networks suggests that we can use more information from the soft target probability to train other neural networks. In this paper, we propose a self-knowledge distillation method, based on the soft target probabilities of the training model itself, where multimode information is distilled from the word embedding space right below the softmax layer. Due to the time complexity, our method approximates the soft target probabilities. In experiments, we applied the proposed method to two different and fundamental NLP tasks: language model and neural machine translation. The experiment results show that our proposed method improves performance on the tasks."
}
Markdown (Informal)
[Self-Knowledge Distillation in Natural Language Processing](https://preview.aclanthology.org/fix-sig-urls/R19-1050/) (Hahn & Choi, RANLP 2019)
ACL