@inproceedings{khetan-karnin-2020-schubert,
title = "schu{BERT}: Optimizing Elements of {BERT}",
author = "Khetan, Ashish and
Karnin, Zohar",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.250",
doi = "10.18653/v1/2020.acl-main.250",
pages = "2807--2818",
abstract = "Transformers have gradually become a key component for many state-of-the-art natural language representation models. A recent Transformer based model- BERTachieved state-of-the-art results on various natural language processing tasks, including GLUE, SQuAD v1.1, and SQuAD v2.0. This model however is computationally prohibitive and has a huge number of parameters. In this work we revisit the architecture choices of BERT in efforts to obtain a lighter model. We focus on reducing the number of parameters yet our methods can be applied towards other objectives such FLOPs or latency. We show that much efficient light BERT models can be obtained by reducing algorithmically chosen correct architecture design dimensions rather than reducing the number of Transformer encoder layers. In particular, our schuBERT gives 6.6{\%} higher average accuracy on GLUE and SQuAD datasets as compared to BERT with three encoder layers while having the same number of parameters.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khetan-karnin-2020-schubert">
<titleInfo>
<title>schuBERT: Optimizing Elements of BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="family">Khetan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zohar</namePart>
<namePart type="family">Karnin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Transformers have gradually become a key component for many state-of-the-art natural language representation models. A recent Transformer based model- BERTachieved state-of-the-art results on various natural language processing tasks, including GLUE, SQuAD v1.1, and SQuAD v2.0. This model however is computationally prohibitive and has a huge number of parameters. In this work we revisit the architecture choices of BERT in efforts to obtain a lighter model. We focus on reducing the number of parameters yet our methods can be applied towards other objectives such FLOPs or latency. We show that much efficient light BERT models can be obtained by reducing algorithmically chosen correct architecture design dimensions rather than reducing the number of Transformer encoder layers. In particular, our schuBERT gives 6.6% higher average accuracy on GLUE and SQuAD datasets as compared to BERT with three encoder layers while having the same number of parameters.</abstract>
<identifier type="citekey">khetan-karnin-2020-schubert</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.250</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.250</url>
</location>
<part>
<date>2020-jul</date>
<extent unit="page">
<start>2807</start>
<end>2818</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T schuBERT: Optimizing Elements of BERT
%A Khetan, Ashish
%A Karnin, Zohar
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Online
%F khetan-karnin-2020-schubert
%X Transformers have gradually become a key component for many state-of-the-art natural language representation models. A recent Transformer based model- BERTachieved state-of-the-art results on various natural language processing tasks, including GLUE, SQuAD v1.1, and SQuAD v2.0. This model however is computationally prohibitive and has a huge number of parameters. In this work we revisit the architecture choices of BERT in efforts to obtain a lighter model. We focus on reducing the number of parameters yet our methods can be applied towards other objectives such FLOPs or latency. We show that much efficient light BERT models can be obtained by reducing algorithmically chosen correct architecture design dimensions rather than reducing the number of Transformer encoder layers. In particular, our schuBERT gives 6.6% higher average accuracy on GLUE and SQuAD datasets as compared to BERT with three encoder layers while having the same number of parameters.
%R 10.18653/v1/2020.acl-main.250
%U https://aclanthology.org/2020.acl-main.250
%U https://doi.org/10.18653/v1/2020.acl-main.250
%P 2807-2818
Markdown (Informal)
[schuBERT: Optimizing Elements of BERT](https://aclanthology.org/2020.acl-main.250) (Khetan & Karnin, ACL 2020)
ACL
- Ashish Khetan and Zohar Karnin. 2020. schuBERT: Optimizing Elements of BERT. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2807–2818, Online. Association for Computational Linguistics.