@inproceedings{sankar-ravi-2019-deep,
title = "Deep Reinforcement Learning For Modeling Chit-Chat Dialog With Discrete Attributes",
author = "Sankar, Chinnadhurai and
Ravi, Sujith",
booktitle = "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue",
month = sep,
year = "2019",
address = "Stockholm, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-5901",
doi = "10.18653/v1/W19-5901",
pages = "1--10",
abstract = "Open domain dialog systems face the challenge of being repetitive and producing generic responses. In this paper, we demonstrate that by conditioning the response generation on interpretable discrete dialog attributes and composed attributes, it helps improve the model perplexity and results in diverse and interesting non-redundant responses. We propose to formulate the dialog attribute prediction as a reinforcement learning (RL) problem and use policy gradients methods to optimize utterance generation using long-term rewards. Unlike existing RL approaches which formulate the token prediction as a policy, our method reduces the complexity of the policy optimization by limiting the action space to dialog attributes, thereby making the policy optimization more practical and sample efficient. We demonstrate this with experimental and human evaluations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sankar-ravi-2019-deep">
<titleInfo>
<title>Deep Reinforcement Learning For Modeling Chit-Chat Dialog With Discrete Attributes</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chinnadhurai</namePart>
<namePart type="family">Sankar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sujith</namePart>
<namePart type="family">Ravi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-sep</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Stockholm, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Open domain dialog systems face the challenge of being repetitive and producing generic responses. In this paper, we demonstrate that by conditioning the response generation on interpretable discrete dialog attributes and composed attributes, it helps improve the model perplexity and results in diverse and interesting non-redundant responses. We propose to formulate the dialog attribute prediction as a reinforcement learning (RL) problem and use policy gradients methods to optimize utterance generation using long-term rewards. Unlike existing RL approaches which formulate the token prediction as a policy, our method reduces the complexity of the policy optimization by limiting the action space to dialog attributes, thereby making the policy optimization more practical and sample efficient. We demonstrate this with experimental and human evaluations.</abstract>
<identifier type="citekey">sankar-ravi-2019-deep</identifier>
<identifier type="doi">10.18653/v1/W19-5901</identifier>
<location>
<url>https://aclanthology.org/W19-5901</url>
</location>
<part>
<date>2019-sep</date>
<extent unit="page">
<start>1</start>
<end>10</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Deep Reinforcement Learning For Modeling Chit-Chat Dialog With Discrete Attributes
%A Sankar, Chinnadhurai
%A Ravi, Sujith
%S Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue
%D 2019
%8 sep
%I Association for Computational Linguistics
%C Stockholm, Sweden
%F sankar-ravi-2019-deep
%X Open domain dialog systems face the challenge of being repetitive and producing generic responses. In this paper, we demonstrate that by conditioning the response generation on interpretable discrete dialog attributes and composed attributes, it helps improve the model perplexity and results in diverse and interesting non-redundant responses. We propose to formulate the dialog attribute prediction as a reinforcement learning (RL) problem and use policy gradients methods to optimize utterance generation using long-term rewards. Unlike existing RL approaches which formulate the token prediction as a policy, our method reduces the complexity of the policy optimization by limiting the action space to dialog attributes, thereby making the policy optimization more practical and sample efficient. We demonstrate this with experimental and human evaluations.
%R 10.18653/v1/W19-5901
%U https://aclanthology.org/W19-5901
%U https://doi.org/10.18653/v1/W19-5901
%P 1-10
Markdown (Informal)
[Deep Reinforcement Learning For Modeling Chit-Chat Dialog With Discrete Attributes](https://aclanthology.org/W19-5901) (Sankar & Ravi, 2019)
ACL