@inproceedings{wang-etal-2018-bi,
title = "A Bi-Model Based {RNN} Semantic Frame Parsing Model for Intent Detection and Slot Filling",
author = "Wang, Yu and
Shen, Yilin and
Jin, Hongxia",
editor = "Walker, Marilyn and
Ji, Heng and
Stent, Amanda",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/N18-2050/",
doi = "10.18653/v1/N18-2050",
pages = "309--314",
abstract = "Intent detection and slot filling are two main tasks for building a spoken language understanding(SLU) system. Multiple deep learning based models have demonstrated good results on these tasks . The most effective algorithms are based on the structures of sequence to sequence models (or ``encoder-decoder'' models), and generate the intents and semantic tags either using separate models. Most of the previous studies, however, either treat the intent detection and slot filling as two separate parallel tasks, or use a sequence to sequence model to generate both semantic tags and intent. None of the approaches consider the cross-impact between the intent detection task and the slot filling task. In this paper, new Bi-model based RNN semantic frame parsing network structures are designed to perform the intent detection and slot filling tasks jointly, by considering their cross-impact to each other using two correlated bidirectional LSTMs (BLSTM). Our Bi-model structure with a decoder achieves state-of-art result on the benchmark ATIS data, with about 0.5{\%} intent accuracy improvement and 0.9 {\%} slot filling improvement."
}
Markdown (Informal)
[A Bi-Model Based RNN Semantic Frame Parsing Model for Intent Detection and Slot Filling](https://preview.aclanthology.org/fix-sig-urls/N18-2050/) (Wang et al., NAACL 2018)
ACL