@inproceedings{rastogi-etal-2018-multi,
title = "Multi-task Learning for Joint Language Understanding and Dialogue State Tracking",
author = "Rastogi, Abhinav and
Gupta, Raghav and
Hakkani-Tur, Dilek",
booktitle = "Proceedings of the 19th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5045",
doi = "10.18653/v1/W18-5045",
pages = "376--384",
abstract = "This paper presents a novel approach for multi-task learning of language understanding (LU) and dialogue state tracking (DST) in task-oriented dialogue systems. Multi-task training enables the sharing of the neural network layers responsible for encoding the user utterance for both LU and DST and improves performance while reducing the number of network parameters. In our proposed framework, DST operates on a set of candidate values for each slot that has been mentioned so far. These candidate sets are generated using LU slot annotations for the current user utterance, dialogue acts corresponding to the preceding system utterance and the dialogue state estimated for the previous turn, enabling DST to handle slots with a large or unbounded set of possible values and deal with slot values not seen during training. Furthermore, to bridge the gap between training and inference, we investigate the use of scheduled sampling on LU output for the current user utterance as well as the DST output for the preceding turn.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rastogi-etal-2018-multi">
<titleInfo>
<title>Multi-task Learning for Joint Language Understanding and Dialogue State Tracking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raghav</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dilek</namePart>
<namePart type="family">Hakkani-Tur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents a novel approach for multi-task learning of language understanding (LU) and dialogue state tracking (DST) in task-oriented dialogue systems. Multi-task training enables the sharing of the neural network layers responsible for encoding the user utterance for both LU and DST and improves performance while reducing the number of network parameters. In our proposed framework, DST operates on a set of candidate values for each slot that has been mentioned so far. These candidate sets are generated using LU slot annotations for the current user utterance, dialogue acts corresponding to the preceding system utterance and the dialogue state estimated for the previous turn, enabling DST to handle slots with a large or unbounded set of possible values and deal with slot values not seen during training. Furthermore, to bridge the gap between training and inference, we investigate the use of scheduled sampling on LU output for the current user utterance as well as the DST output for the preceding turn.</abstract>
<identifier type="citekey">rastogi-etal-2018-multi</identifier>
<identifier type="doi">10.18653/v1/W18-5045</identifier>
<location>
<url>https://aclanthology.org/W18-5045</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>376</start>
<end>384</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-task Learning for Joint Language Understanding and Dialogue State Tracking
%A Rastogi, Abhinav
%A Gupta, Raghav
%A Hakkani-Tur, Dilek
%S Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F rastogi-etal-2018-multi
%X This paper presents a novel approach for multi-task learning of language understanding (LU) and dialogue state tracking (DST) in task-oriented dialogue systems. Multi-task training enables the sharing of the neural network layers responsible for encoding the user utterance for both LU and DST and improves performance while reducing the number of network parameters. In our proposed framework, DST operates on a set of candidate values for each slot that has been mentioned so far. These candidate sets are generated using LU slot annotations for the current user utterance, dialogue acts corresponding to the preceding system utterance and the dialogue state estimated for the previous turn, enabling DST to handle slots with a large or unbounded set of possible values and deal with slot values not seen during training. Furthermore, to bridge the gap between training and inference, we investigate the use of scheduled sampling on LU output for the current user utterance as well as the DST output for the preceding turn.
%R 10.18653/v1/W18-5045
%U https://aclanthology.org/W18-5045
%U https://doi.org/10.18653/v1/W18-5045
%P 376-384
Markdown (Informal)
[Multi-task Learning for Joint Language Understanding and Dialogue State Tracking](https://aclanthology.org/W18-5045) (Rastogi et al., 2018)
ACL