@inproceedings{wang-etal-2021-minilmv2,
title = "{M}ini{LM}v2: Multi-Head Self-Attention Relation Distillation for Compressing Pretrained Transformers",
author = "Wang, Wenhui and
Bao, Hangbo and
Huang, Shaohan and
Dong, Li and
Wei, Furu",
editor = "Zong, Chengqing and
Xia, Fei and
Li, Wenjie and
Navigli, Roberto",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.findings-acl.188/",
doi = "10.18653/v1/2021.findings-acl.188",
pages = "2140--2151"
}
Markdown (Informal)
[MiniLMv2: Multi-Head Self-Attention Relation Distillation for Compressing Pretrained Transformers](https://preview.aclanthology.org/fix-sig-urls/2021.findings-acl.188/) (Wang et al., Findings 2021)
ACL