@inproceedings{homma-etal-2018-maximizing,
title = "Maximizing {SLU} Performance with Minimal Training Data Using Hybrid {RNN} Plus Rule-based Approach",
author = "Homma, Takeshi and
Arantes, Adriano S. and
Gonzalez Diaz, Maria Teresa and
Togami, Masahito",
booktitle = "Proceedings of the 19th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5043",
doi = "10.18653/v1/W18-5043",
pages = "366--370",
abstract = "Spoken language understanding (SLU) by using recurrent neural networks (RNN) achieves good performances for large training data sets, but collecting large training datasets is a challenge, especially for new voice applications. Therefore, the purpose of this study is to maximize SLU performances, especially for small training data sets. To this aim, we propose a novel CRF-based dialog act selector which chooses suitable dialog acts from outputs of RNN SLU and rule-based SLU. We evaluate the selector by using DSTC2 corpus when RNN SLU is trained by less than 1,000 training sentences. The evaluation demonstrates the selector achieves Micro F1 better than both RNN and rule-based SLUs. In addition, it shows the selector achieves better Macro F1 than RNN SLU and the same Macro F1 as rule-based SLU. Thus, we confirmed our method offers advantages in SLU performances for small training data sets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="homma-etal-2018-maximizing">
<titleInfo>
<title>Maximizing SLU Performance with Minimal Training Data Using Hybrid RNN Plus Rule-based Approach</title>
</titleInfo>
<name type="personal">
<namePart type="given">Takeshi</namePart>
<namePart type="family">Homma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adriano</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Arantes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="given">Teresa</namePart>
<namePart type="family">Gonzalez Diaz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masahito</namePart>
<namePart type="family">Togami</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Spoken language understanding (SLU) by using recurrent neural networks (RNN) achieves good performances for large training data sets, but collecting large training datasets is a challenge, especially for new voice applications. Therefore, the purpose of this study is to maximize SLU performances, especially for small training data sets. To this aim, we propose a novel CRF-based dialog act selector which chooses suitable dialog acts from outputs of RNN SLU and rule-based SLU. We evaluate the selector by using DSTC2 corpus when RNN SLU is trained by less than 1,000 training sentences. The evaluation demonstrates the selector achieves Micro F1 better than both RNN and rule-based SLUs. In addition, it shows the selector achieves better Macro F1 than RNN SLU and the same Macro F1 as rule-based SLU. Thus, we confirmed our method offers advantages in SLU performances for small training data sets.</abstract>
<identifier type="citekey">homma-etal-2018-maximizing</identifier>
<identifier type="doi">10.18653/v1/W18-5043</identifier>
<location>
<url>https://aclanthology.org/W18-5043</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>366</start>
<end>370</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Maximizing SLU Performance with Minimal Training Data Using Hybrid RNN Plus Rule-based Approach
%A Homma, Takeshi
%A Arantes, Adriano S.
%A Gonzalez Diaz, Maria Teresa
%A Togami, Masahito
%S Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F homma-etal-2018-maximizing
%X Spoken language understanding (SLU) by using recurrent neural networks (RNN) achieves good performances for large training data sets, but collecting large training datasets is a challenge, especially for new voice applications. Therefore, the purpose of this study is to maximize SLU performances, especially for small training data sets. To this aim, we propose a novel CRF-based dialog act selector which chooses suitable dialog acts from outputs of RNN SLU and rule-based SLU. We evaluate the selector by using DSTC2 corpus when RNN SLU is trained by less than 1,000 training sentences. The evaluation demonstrates the selector achieves Micro F1 better than both RNN and rule-based SLUs. In addition, it shows the selector achieves better Macro F1 than RNN SLU and the same Macro F1 as rule-based SLU. Thus, we confirmed our method offers advantages in SLU performances for small training data sets.
%R 10.18653/v1/W18-5043
%U https://aclanthology.org/W18-5043
%U https://doi.org/10.18653/v1/W18-5043
%P 366-370
Markdown (Informal)
[Maximizing SLU Performance with Minimal Training Data Using Hybrid RNN Plus Rule-based Approach](https://aclanthology.org/W18-5043) (Homma et al., 2018)
ACL