@inproceedings{chen-yu-2021-gold,
title = "{GOLD}: Improving Out-of-Scope Detection in Dialogues using Data Augmentation",
author = "Chen, Derek and
Yu, Zhou",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2021.emnlp-main.35/",
doi = "10.18653/v1/2021.emnlp-main.35",
pages = "429--442",
abstract = "Practical dialogue systems require robust methods of detecting out-of-scope (OOS) utterances to avoid conversational breakdowns and related failure modes. Directly training a model with labeled OOS examples yields reasonable performance, but obtaining such data is a resource-intensive process. To tackle this limited-data problem, previous methods focus on better modeling the distribution of in-scope (INS) examples. We introduce GOLD as an orthogonal technique that augments existing data to train better OOS detectors operating in low-data regimes. GOLD generates pseudo-labeled candidates using samples from an auxiliary dataset and keeps only the most beneficial candidates for training through a novel filtering mechanism. In experiments across three target benchmarks, the top GOLD model outperforms all existing methods on all key metrics, achieving relative gains of 52.4{\%}, 48.9{\%} and 50.3{\%} against median baseline performance. We also analyze the unique properties of OOS data to identify key factors for optimally applying our proposed method."
}
Markdown (Informal)
[GOLD: Improving Out-of-Scope Detection in Dialogues using Data Augmentation](https://preview.aclanthology.org/landing_page/2021.emnlp-main.35/) (Chen & Yu, EMNLP 2021)
ACL