@inproceedings{yu-etal-2023-krls,
title = "{KRLS}: Improving End-to-End Response Generation in Task Oriented Dialog with Reinforced Keywords Learning",
author = "Yu, Xiao and
Wu, Qingyang and
Qian, Kun and
Yu, Zhou",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.759/",
doi = "10.18653/v1/2023.emnlp-main.759",
pages = "12338--12358",
abstract = "In task-oriented dialogs (TOD), reinforcement learning (RL) algorithms train a model to directly optimize response for task-related metrics. However, RL often needs to perform exploration, which can be time-consuming due to the slow auto-regressive sequence generation process. We investigate an approach to create a more efficient RL-based algorithm to improve TOD performance in an offline setting. First, we use a faster generation procedure that samples from independent next-word distributions after training the language model (LM) with supervised learning. We then introduce a fine-grained reward function to help the model focus on learning key information in a dialog, by measuring the importance and semantic closeness of each generated token. Experiments on the MultiWoZ dataset show our new training algorithm, Keywords Reinforcement Learning with Next-word Sampling (KRLS), achieves state-of-the-art performance on the end-to-end response generation task, with a 15{\%} training time reduction compared to a standard RL algorithm using auto-regressive generation."
}
Markdown (Informal)
[KRLS: Improving End-to-End Response Generation in Task Oriented Dialog with Reinforced Keywords Learning](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.759/) (Yu et al., EMNLP 2023)
ACL