@inproceedings{pasunuru-etal-2017-towards,
title = "Towards Improving Abstractive Summarization via Entailment Generation",
author = "Pasunuru, Ramakanth and
Guo, Han and
Bansal, Mohit",
editor = "Wang, Lu and
Cheung, Jackie Chi Kit and
Carenini, Giuseppe and
Liu, Fei",
booktitle = "Proceedings of the Workshop on New Frontiers in Summarization",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/W17-4504/",
doi = "10.18653/v1/W17-4504",
pages = "27--32",
abstract = "Abstractive summarization, the task of rewriting and compressing a document into a short summary, has achieved considerable success with neural sequence-to-sequence models. However, these models can still benefit from stronger natural language inference skills, since a correct summary is logically entailed by the input document, i.e., it should not contain any contradictory or unrelated information. We incorporate such knowledge into an abstractive summarization model via multi-task learning, where we share its decoder parameters with those of an entailment generation model. We achieve promising initial improvements based on multiple metrics and datasets (including a test-only setting). The domain mismatch between the entailment (captions) and summarization (news) datasets suggests that the model is learning some domain-agnostic inference skills."
}
Markdown (Informal)
[Towards Improving Abstractive Summarization via Entailment Generation](https://preview.aclanthology.org/ingest_wac_2008/W17-4504/) (Pasunuru et al., 2017)
ACL