@inproceedings{chiba-etal-2018-analysis,
title = "An Analysis of the Effect of Emotional Speech Synthesis on Non-Task-Oriented Dialogue System",
author = "Chiba, Yuya and
Nose, Takashi and
Kase, Taketo and
Yamanaka, Mai and
Ito, Akinori",
booktitle = "Proceedings of the 19th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5044",
doi = "10.18653/v1/W18-5044",
pages = "371--375",
abstract = "This paper explores the effect of emotional speech synthesis on a spoken dialogue system when the dialogue is non-task-oriented. Although the use of emotional speech responses have been shown to be effective in a limited domain, e.g., scenario-based and counseling dialogue, the effect is still not clear in the non-task-oriented dialogue such as voice chatting. For this purpose, we constructed a simple dialogue system with example- and rule-based dialogue management. In the system, two types of emotion labeling with emotion estimation are adopted, i.e., system-driven and user-cooperative emotion labeling. We conducted a dialogue experiment where subjects evaluate the subjective quality of the system and the dialogue from the multiple aspects such as richness of the dialogue and impression of the agent. We then analyze and discuss the results and show the advantage of using appropriate emotions for the expressive speech responses in the non-task-oriented system.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chiba-etal-2018-analysis">
<titleInfo>
<title>An Analysis of the Effect of Emotional Speech Synthesis on Non-Task-Oriented Dialogue System</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuya</namePart>
<namePart type="family">Chiba</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takashi</namePart>
<namePart type="family">Nose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taketo</namePart>
<namePart type="family">Kase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mai</namePart>
<namePart type="family">Yamanaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Akinori</namePart>
<namePart type="family">Ito</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper explores the effect of emotional speech synthesis on a spoken dialogue system when the dialogue is non-task-oriented. Although the use of emotional speech responses have been shown to be effective in a limited domain, e.g., scenario-based and counseling dialogue, the effect is still not clear in the non-task-oriented dialogue such as voice chatting. For this purpose, we constructed a simple dialogue system with example- and rule-based dialogue management. In the system, two types of emotion labeling with emotion estimation are adopted, i.e., system-driven and user-cooperative emotion labeling. We conducted a dialogue experiment where subjects evaluate the subjective quality of the system and the dialogue from the multiple aspects such as richness of the dialogue and impression of the agent. We then analyze and discuss the results and show the advantage of using appropriate emotions for the expressive speech responses in the non-task-oriented system.</abstract>
<identifier type="citekey">chiba-etal-2018-analysis</identifier>
<identifier type="doi">10.18653/v1/W18-5044</identifier>
<location>
<url>https://aclanthology.org/W18-5044</url>
</location>
<part>
<date>2018-jul</date>
<extent unit="page">
<start>371</start>
<end>375</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An Analysis of the Effect of Emotional Speech Synthesis on Non-Task-Oriented Dialogue System
%A Chiba, Yuya
%A Nose, Takashi
%A Kase, Taketo
%A Yamanaka, Mai
%A Ito, Akinori
%S Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue
%D 2018
%8 jul
%I Association for Computational Linguistics
%C Melbourne, Australia
%F chiba-etal-2018-analysis
%X This paper explores the effect of emotional speech synthesis on a spoken dialogue system when the dialogue is non-task-oriented. Although the use of emotional speech responses have been shown to be effective in a limited domain, e.g., scenario-based and counseling dialogue, the effect is still not clear in the non-task-oriented dialogue such as voice chatting. For this purpose, we constructed a simple dialogue system with example- and rule-based dialogue management. In the system, two types of emotion labeling with emotion estimation are adopted, i.e., system-driven and user-cooperative emotion labeling. We conducted a dialogue experiment where subjects evaluate the subjective quality of the system and the dialogue from the multiple aspects such as richness of the dialogue and impression of the agent. We then analyze and discuss the results and show the advantage of using appropriate emotions for the expressive speech responses in the non-task-oriented system.
%R 10.18653/v1/W18-5044
%U https://aclanthology.org/W18-5044
%U https://doi.org/10.18653/v1/W18-5044
%P 371-375
Markdown (Informal)
[An Analysis of the Effect of Emotional Speech Synthesis on Non-Task-Oriented Dialogue System](https://aclanthology.org/W18-5044) (Chiba et al., 2018)
ACL