@inproceedings{wang-etal-2018-deep,
title = "Deep Reinforcement Learning for {NLP}",
author = "Wang, William Yang and
Li, Jiwei and
He, Xiaodong",
editor = "Artzi, Yoav and
Eisenstein, Jacob",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/P18-5007/",
doi = "10.18653/v1/P18-5007",
pages = "19--21",
abstract = "Many Natural Language Processing (NLP) tasks (including generation, language grounding, reasoning, information extraction, coreference resolution, and dialog) can be formulated as deep reinforcement learning (DRL) problems. However, since language is often discrete and the space for all sentences is infinite, there are many challenges for formulating reinforcement learning problems of NLP tasks. In this tutorial, we provide a gentle introduction to the foundation of deep reinforcement learning, as well as some practical DRL solutions in NLP. We describe recent advances in designing deep reinforcement learning for NLP, with a special focus on generation, dialogue, and information extraction. Finally, we discuss why they succeed, and when they may fail, aiming at providing some practical advice about deep reinforcement learning for solving real-world NLP problems."
}
Markdown (Informal)
[Deep Reinforcement Learning for NLP](https://preview.aclanthology.org/fix-sig-urls/P18-5007/) (Wang et al., ACL 2018)
ACL
- William Yang Wang, Jiwei Li, and Xiaodong He. 2018. Deep Reinforcement Learning for NLP. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 19–21, Melbourne, Australia. Association for Computational Linguistics.