@inproceedings{prasanna-etal-2020-bert,
title = "{W}hen {BERT} {P}lays the {L}ottery, {A}ll {T}ickets {A}re {W}inning",
author = "Prasanna, Sai and
Rogers, Anna and
Rumshisky, Anna",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.emnlp-main.259/",
doi = "10.18653/v1/2020.emnlp-main.259",
pages = "3208--3229",
abstract = "Large Transformer-based models were shown to be reducible to a smaller number of self-attention heads and layers. We consider this phenomenon from the perspective of the lottery ticket hypothesis, using both structured and magnitude pruning. For fine-tuned BERT, we show that (a) it is possible to find subnetworks achieving performance that is comparable with that of the full model, and (b) similarly-sized subnetworks sampled from the rest of the model perform worse. Strikingly, with structured pruning even the worst possible subnetworks remain highly trainable, indicating that most pre-trained BERT weights are potentially useful. We also study the {\textquotedblleft}good{\textquotedblright} subnetworks to see if their success can be attributed to superior linguistic knowledge, but find them unstable, and not explained by meaningful self-attention patterns."
}
Markdown (Informal)
[When BERT Plays the Lottery, All Tickets Are Winning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.emnlp-main.259/) (Prasanna et al., EMNLP 2020)
ACL
- Sai Prasanna, Anna Rogers, and Anna Rumshisky. 2020. When BERT Plays the Lottery, All Tickets Are Winning. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3208–3229, Online. Association for Computational Linguistics.