@inproceedings{kanashiro-pereira-etal-2021-multi,
title = "Multi-Layer Random Perturbation Training for improving Model Generalization Efficiently",
author = "Kanashiro Pereira, Lis and
Taya, Yuki and
Kobayashi, Ichiro",
editor = "Bastings, Jasmijn and
Belinkov, Yonatan and
Dupoux, Emmanuel and
Giulianelli, Mario and
Hupkes, Dieuwke and
Pinter, Yuval and
Sajjad, Hassan",
booktitle = "Proceedings of the Fourth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2021.blackboxnlp-1.23/",
doi = "10.18653/v1/2021.blackboxnlp-1.23",
pages = "303--310",
abstract = "We propose a simple yet effective Multi-Layer RAndom Perturbation Training algorithm (RAPT) to enhance model robustness and generalization. The key idea is to apply randomly sampled noise to each input to generate label-preserving artificial input points. To encourage the model to generate more diverse examples, the noise is added to a combination of the model layers. Then, our model regularizes the posterior difference between clean and noisy inputs. We apply RAPT towards robust and efficient BERT training, and conduct comprehensive fine-tuning experiments on GLUE tasks. Our results show that RAPT outperforms the standard fine-tuning approach, and adversarial training method, yet with 22{\%} less training time."
}
Markdown (Informal)
[Multi-Layer Random Perturbation Training for improving Model Generalization Efficiently](https://preview.aclanthology.org/landing_page/2021.blackboxnlp-1.23/) (Kanashiro Pereira et al., BlackboxNLP 2021)
ACL