@inproceedings{wen-etal-2018-sequence,
title = "Sequence-to-Sequence Learning for Task-oriented Dialogue with Dialogue State Representation",
author = "Wen, Haoyang and
Liu, Yijia and
Che, Wanxiang and
Qin, Libo and
Liu, Ting",
editor = "Bender, Emily M. and
Derczynski, Leon and
Isabelle, Pierre",
booktitle = "Proceedings of the 27th International Conference on Computational Linguistics",
month = aug,
year = "2018",
address = "Santa Fe, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/C18-1320/",
pages = "3781--3792",
abstract = "Classic pipeline models for task-oriented dialogue system require explicit modeling the dialogue states and hand-crafted action spaces to query a domain-specific knowledge base. Conversely, sequence-to-sequence models learn to map dialogue history to the response in current turn without explicit knowledge base querying. In this work, we propose a novel framework that leverages the advantages of classic pipeline and sequence-to-sequence models. Our framework models a dialogue state as a fixed-size distributed representation and use this representation to query a knowledge base via an attention mechanism. Experiment on Stanford Multi-turn Multi-domain Task-oriented Dialogue Dataset shows that our framework significantly outperforms other sequence-to-sequence based baseline models on both automatic and human evaluation."
}
Markdown (Informal)
[Sequence-to-Sequence Learning for Task-oriented Dialogue with Dialogue State Representation](https://preview.aclanthology.org/jlcl-multiple-ingestion/C18-1320/) (Wen et al., COLING 2018)
ACL