@inproceedings{lang-etal-2024-domain,
title = "Out-of-Domain Intent Detection Considering Multi-Turn Dialogue Contexts",
author = "Lang, Hao and
Zheng, Yinhe and
Hui, Binyuan and
Huang, Fei and
Li, Yongbin",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.1097/",
pages = "12539--12552",
abstract = "Out-of-Domain (OOD) intent detection is vital for practical dialogue systems, and it usually requires considering multi-turn dialogue contexts. However, most previous OOD intent detection approaches are limited to single dialogue turns. In this paper, we introduce a context-aware OOD intent detection (Caro) framework to model multi-turn contexts in OOD intent detection tasks. Specifically, we follow the information bottleneck principle to extract robust representations from multi-turn dialogue contexts. Two different views are constructed for each input sample and the superfluous information not related to intent detection is removed using a multi-view information bottleneck loss. Moreover, we also explore utilizing unlabeled data in Caro. A two-stage training process is introduced to mine OOD samples from these unlabeled data, and these OOD samples are used to train the resulting model with a bootstrapping approach. Comprehensive experiments demonstrate that Caro establishes state-of-the-art performances on multi-turn OOD detection tasks by improving the F1-OOD score of over 29{\%} compared to the previous best method."
}
Markdown (Informal)
[Out-of-Domain Intent Detection Considering Multi-Turn Dialogue Contexts](https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.1097/) (Lang et al., LREC-COLING 2024)
ACL