@inproceedings{bhardwaj-etal-2021-knowledge,
title = "Knowledge Distillation with Noisy Labels for Natural Language Understanding",
author = "Bhardwaj, Shivendra and
Ghaddar, Abbas and
Rashid, Ahmad and
Bibi, Khalil and
Li, Chengyang and
Ghodsi, Ali and
Langlais, Phillippe and
Rezagholizadeh, Mehdi",
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wnut-1.33",
doi = "10.18653/v1/2021.wnut-1.33",
pages = "297--303",
abstract = "Knowledge Distillation (KD) is extensively used to compress and deploy large pre-trained language models on edge devices for real-world applications. However, one neglected area of research is the impact of noisy (corrupted) labels on KD. We present, to the best of our knowledge, the first study on KD with noisy labels in Natural Language Understanding (NLU). We document the scope of the problem and present two methods to mitigate the impact of label noise. Experiments on the GLUE benchmark show that our methods are effective even under high noise levels. Nevertheless, our results indicate that more research is necessary to cope with label noise under the KD.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bhardwaj-etal-2021-knowledge">
<titleInfo>
<title>Knowledge Distillation with Noisy Labels for Natural Language Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shivendra</namePart>
<namePart type="family">Bhardwaj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abbas</namePart>
<namePart type="family">Ghaddar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ahmad</namePart>
<namePart type="family">Rashid</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Khalil</namePart>
<namePart type="family">Bibi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengyang</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Ghodsi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Phillippe</namePart>
<namePart type="family">Langlais</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mehdi</namePart>
<namePart type="family">Rezagholizadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Knowledge Distillation (KD) is extensively used to compress and deploy large pre-trained language models on edge devices for real-world applications. However, one neglected area of research is the impact of noisy (corrupted) labels on KD. We present, to the best of our knowledge, the first study on KD with noisy labels in Natural Language Understanding (NLU). We document the scope of the problem and present two methods to mitigate the impact of label noise. Experiments on the GLUE benchmark show that our methods are effective even under high noise levels. Nevertheless, our results indicate that more research is necessary to cope with label noise under the KD.</abstract>
<identifier type="citekey">bhardwaj-etal-2021-knowledge</identifier>
<identifier type="doi">10.18653/v1/2021.wnut-1.33</identifier>
<location>
<url>https://aclanthology.org/2021.wnut-1.33</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>297</start>
<end>303</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Knowledge Distillation with Noisy Labels for Natural Language Understanding
%A Bhardwaj, Shivendra
%A Ghaddar, Abbas
%A Rashid, Ahmad
%A Bibi, Khalil
%A Li, Chengyang
%A Ghodsi, Ali
%A Langlais, Phillippe
%A Rezagholizadeh, Mehdi
%S Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Online
%F bhardwaj-etal-2021-knowledge
%X Knowledge Distillation (KD) is extensively used to compress and deploy large pre-trained language models on edge devices for real-world applications. However, one neglected area of research is the impact of noisy (corrupted) labels on KD. We present, to the best of our knowledge, the first study on KD with noisy labels in Natural Language Understanding (NLU). We document the scope of the problem and present two methods to mitigate the impact of label noise. Experiments on the GLUE benchmark show that our methods are effective even under high noise levels. Nevertheless, our results indicate that more research is necessary to cope with label noise under the KD.
%R 10.18653/v1/2021.wnut-1.33
%U https://aclanthology.org/2021.wnut-1.33
%U https://doi.org/10.18653/v1/2021.wnut-1.33
%P 297-303
Markdown (Informal)
[Knowledge Distillation with Noisy Labels for Natural Language Understanding](https://aclanthology.org/2021.wnut-1.33) (Bhardwaj et al., WNUT 2021)
ACL