@inproceedings{li-etal-2017-deep,
title = "Deep Recurrent Generative Decoder for Abstractive Text Summarization",
author = "Li, Piji and
Lam, Wai and
Bing, Lidong and
Wang, Zihao",
editor = "Palmer, Martha and
Hwa, Rebecca and
Riedel, Sebastian",
booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/D17-1222/",
doi = "10.18653/v1/D17-1222",
pages = "2091--2100",
abstract = "We propose a new framework for abstractive text summarization based on a sequence-to-sequence oriented encoder-decoder model equipped with a deep recurrent generative decoder (DRGN). Latent structure information implied in the target summaries is learned based on a recurrent latent random model for improving the summarization quality. Neural variational inference is employed to address the intractable posterior inference for the recurrent latent variables. Abstractive summaries are generated based on both the generative latent variables and the discriminative deterministic states. Extensive experiments on some benchmark datasets in different languages show that DRGN achieves improvements over the state-of-the-art methods."
}
Markdown (Informal)
[Deep Recurrent Generative Decoder for Abstractive Text Summarization](https://preview.aclanthology.org/jlcl-multiple-ingestion/D17-1222/) (Li et al., EMNLP 2017)
ACL