@inproceedings{anderson-gomez-rodriguez-2020-distilling,
title = "Distilling Neural Networks for Greener and Faster Dependency Parsing",
author = "Anderson, Mark and
G{\'o}mez-Rodr{\'\i}guez, Carlos",
editor = "Bouma, Gosse and
Matsumoto, Yuji and
Oepen, Stephan and
Sagae, Kenji and
Seddah, Djam{\'e} and
Sun, Weiwei and
S{\o}gaard, Anders and
Tsarfaty, Reut and
Zeman, Dan",
booktitle = "Proceedings of the 16th International Conference on Parsing Technologies and the IWPT 2020 Shared Task on Parsing into Enhanced Universal Dependencies",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.iwpt-1.2",
doi = "10.18653/v1/2020.iwpt-1.2",
pages = "2--13",
abstract = "The carbon footprint of natural language processing research has been increasing in recent years due to its reliance on large and inefficient neural network implementations. Distillation is a network compression technique which attempts to impart knowledge from a large model to a smaller one. We use teacher-student distillation to improve the efficiency of the Biaffine dependency parser which obtains state-of-the-art performance with respect to accuracy and parsing speed (Dozat and Manning, 2017). When distilling to 20{\%} of the original model{'}s trainable parameters, we only observe an average decrease of ∼1 point for both UAS and LAS across a number of diverse Universal Dependency treebanks while being 2.30x (1.19x) faster than the baseline model on CPU (GPU) at inference time. We also observe a small increase in performance when compressing to 80{\%} for some treebanks. Finally, through distillation we attain a parser which is not only faster but also more accurate than the fastest modern parser on the Penn Treebank.",
}
Markdown (Informal)
[Distilling Neural Networks for Greener and Faster Dependency Parsing](https://aclanthology.org/2020.iwpt-1.2) (Anderson & Gómez-Rodríguez, IWPT 2020)
ACL