@inproceedings{broscheit-etal-2022-distributionally,
title = "Distributionally Robust Finetuning {BERT} for Covariate Drift in Spoken Language Understanding",
author = "Broscheit, Samuel and
Do, Quynh and
Gaspers, Judith",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.acl-long.139/",
doi = "10.18653/v1/2022.acl-long.139",
pages = "1970--1985",
abstract = "In this study, we investigate robustness against covariate drift in spoken language understanding (SLU). Covariate drift can occur in SLUwhen there is a drift between training and testing regarding what users request or how they request it. To study this we propose a method that exploits natural variations in data to create a covariate drift in SLU datasets. Experiments show that a state-of-the-art BERT-based model suffers performance loss under this drift. To mitigate the performance loss, we investigate distributionally robust optimization (DRO) for finetuning BERT-based models. We discuss some recent DRO methods, propose two new variants and empirically show that DRO improves robustness under drift."
}
Markdown (Informal)
[Distributionally Robust Finetuning BERT for Covariate Drift in Spoken Language Understanding](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.acl-long.139/) (Broscheit et al., ACL 2022)
ACL