@inproceedings{dasgupta-etal-2023-cost,
title = "Cost-effective Distillation of Large Language Models",
author = "Dasgupta, Sayantan and
Cohn, Trevor and
Baldwin, Timothy",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.463/",
doi = "10.18653/v1/2023.findings-acl.463",
pages = "7346--7354",
abstract = "Knowledge distillation (KD) involves training a small {\textquotedblleft}student{\textquotedblright} model to replicate the strong performance of a high-capacity {\textquotedblleft}teacher{\textquotedblright} model, enabling efficient deployment in resource-constrained settings. Top-performing methods tend to be task- or architecture-specific and lack generalizability. Several existing approaches require pretraining of the teacher on task-specific datasets, which can be costly for large and unstable for small datasets. Here we propose an approach for improving KD through a novel distillation loss agnostic to the task and model architecture. We successfully apply our method to the distillation of the BERT-base and achieve highly competitive results from the distilled student across a range of GLUE tasks, especially for tasks with smaller datasets."
}
Markdown (Informal)
[Cost-effective Distillation of Large Language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.463/) (Dasgupta et al., Findings 2023)
ACL
- Sayantan Dasgupta, Trevor Cohn, and Timothy Baldwin. 2023. Cost-effective Distillation of Large Language Models. In Findings of the Association for Computational Linguistics: ACL 2023, pages 7346–7354, Toronto, Canada. Association for Computational Linguistics.