@inproceedings{zhong-etal-2018-global,
title = "Global-Locally Self-Attentive Encoder for Dialogue State Tracking",
author = "Zhong, Victor and
Xiong, Caiming and
Socher, Richard",
editor = "Gurevych, Iryna and
Miyao, Yusuke",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/P18-1135/",
doi = "10.18653/v1/P18-1135",
pages = "1458--1467",
abstract = "Dialogue state tracking, which estimates user goals and requests given the dialogue context, is an essential part of task-oriented dialogue systems. In this paper, we propose the Global-Locally Self-Attentive Dialogue State Tracker (GLAD), which learns representations of the user utterance and previous system actions with global-local modules. Our model uses global modules to shares parameters between estimators for different types (called slots) of dialogue states, and uses local modules to learn slot-specific features. We show that this significantly improves tracking of rare states. GLAD obtains 88.3{\%} joint goal accuracy and 96.4{\%} request accuracy on the WoZ state tracking task, outperforming prior work by 3.9{\%} and 4.8{\%}. On the DSTC2 task, our model obtains 74.7{\%} joint goal accuracy and 97.3{\%} request accuracy, outperforming prior work by 1.3{\%} and 0.8{\%}"
}
Markdown (Informal)
[Global-Locally Self-Attentive Encoder for Dialogue State Tracking](https://preview.aclanthology.org/jlcl-multiple-ingestion/P18-1135/) (Zhong et al., ACL 2018)
ACL