@inproceedings{papangelis-etal-2019-collaborative,
title = "Collaborative Multi-Agent Dialogue Model Training Via Reinforcement Learning",
author = "Papangelis, Alexandros and
Wang, Yi-Chia and
Molino, Piero and
Tur, Gokhan",
booktitle = "Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue",
month = sep,
year = "2019",
address = "Stockholm, Sweden",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-5912",
doi = "10.18653/v1/W19-5912",
pages = "92--102",
abstract = "Some of the major challenges in training conversational agents include the lack of large-scale data of real-world complexity, defining appropriate evaluation measures, and managing meaningful conversations across many topics over long periods of time. Moreover, most works tend to assume that the conversational agent{'}s environment is stationary, a somewhat strong assumption. To remove this assumption and overcome the lack of data, we take a step away from the traditional training pipeline and model the conversation as a stochastic collaborative game. Each agent (player) has a role ({``}assistant{''}, {``}tourist{''}, {``}eater{''}, etc.) and their own objectives, and can only interact via language they generate. Each agent, therefore, needs to learn to operate optimally in an environment with multiple sources of uncertainty (its own LU and LG, the other agent{'}s LU, Policy, and LG). In this work, we present the first complete attempt at concurrently training conversational agents that communicate only via self-generated language and show that they outperform supervised and deep learning baselines.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="papangelis-etal-2019-collaborative">
<titleInfo>
<title>Collaborative Multi-Agent Dialogue Model Training Via Reinforcement Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi-Chia</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Piero</namePart>
<namePart type="family">Molino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gokhan</namePart>
<namePart type="family">Tur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-sep</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Stockholm, Sweden</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Some of the major challenges in training conversational agents include the lack of large-scale data of real-world complexity, defining appropriate evaluation measures, and managing meaningful conversations across many topics over long periods of time. Moreover, most works tend to assume that the conversational agent’s environment is stationary, a somewhat strong assumption. To remove this assumption and overcome the lack of data, we take a step away from the traditional training pipeline and model the conversation as a stochastic collaborative game. Each agent (player) has a role (“assistant”, “tourist”, “eater”, etc.) and their own objectives, and can only interact via language they generate. Each agent, therefore, needs to learn to operate optimally in an environment with multiple sources of uncertainty (its own LU and LG, the other agent’s LU, Policy, and LG). In this work, we present the first complete attempt at concurrently training conversational agents that communicate only via self-generated language and show that they outperform supervised and deep learning baselines.</abstract>
<identifier type="citekey">papangelis-etal-2019-collaborative</identifier>
<identifier type="doi">10.18653/v1/W19-5912</identifier>
<location>
<url>https://aclanthology.org/W19-5912</url>
</location>
<part>
<date>2019-sep</date>
<extent unit="page">
<start>92</start>
<end>102</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Collaborative Multi-Agent Dialogue Model Training Via Reinforcement Learning
%A Papangelis, Alexandros
%A Wang, Yi-Chia
%A Molino, Piero
%A Tur, Gokhan
%S Proceedings of the 20th Annual SIGdial Meeting on Discourse and Dialogue
%D 2019
%8 sep
%I Association for Computational Linguistics
%C Stockholm, Sweden
%F papangelis-etal-2019-collaborative
%X Some of the major challenges in training conversational agents include the lack of large-scale data of real-world complexity, defining appropriate evaluation measures, and managing meaningful conversations across many topics over long periods of time. Moreover, most works tend to assume that the conversational agent’s environment is stationary, a somewhat strong assumption. To remove this assumption and overcome the lack of data, we take a step away from the traditional training pipeline and model the conversation as a stochastic collaborative game. Each agent (player) has a role (“assistant”, “tourist”, “eater”, etc.) and their own objectives, and can only interact via language they generate. Each agent, therefore, needs to learn to operate optimally in an environment with multiple sources of uncertainty (its own LU and LG, the other agent’s LU, Policy, and LG). In this work, we present the first complete attempt at concurrently training conversational agents that communicate only via self-generated language and show that they outperform supervised and deep learning baselines.
%R 10.18653/v1/W19-5912
%U https://aclanthology.org/W19-5912
%U https://doi.org/10.18653/v1/W19-5912
%P 92-102
Markdown (Informal)
[Collaborative Multi-Agent Dialogue Model Training Via Reinforcement Learning](https://aclanthology.org/W19-5912) (Papangelis et al., 2019)
ACL