@inproceedings{wang-etal-2022-guiding,
title = "Guiding Abstractive Dialogue Summarization with Content Planning",
author = "Wang, Ye and
Wan, Xiaojun and
Cai, Zhiping",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.248/",
doi = "10.18653/v1/2022.findings-emnlp.248",
pages = "3408--3413",
abstract = "Abstractive dialogue summarization has recently been receiving more attention. We propose a coarse-to-fine model for generating abstractive dialogue summaries, and introduce a fact-aware reinforcement learning (RL) objective that improves the fact consistency between the dialogue and the generated summary. Initially, the model generates the predicate-argument spans of the dialogue, and then generates the final summary through a fact-aware RL objective. Extensive experiments and analysis on two benchmark datasets demonstrate that our proposed method effectively improves the quality of the generated summary, especially in coherence and consistency."
}
Markdown (Informal)
[Guiding Abstractive Dialogue Summarization with Content Planning](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.248/) (Wang et al., Findings 2022)
ACL