@inproceedings{laban-etal-2020-summary,
title = "The Summary Loop: Learning to Write Abstractive Summaries Without Examples",
author = "Laban, Philippe and
Hsi, Andrew and
Canny, John and
Hearst, Marti A.",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.460",
doi = "10.18653/v1/2020.acl-main.460",
pages = "5135--5150",
abstract = "This work presents a new approach to unsupervised abstractive summarization based on maximizing a combination of coverage and fluency for a given length constraint. It introduces a novel method that encourages the inclusion of key terms from the original document into the summary: key terms are masked out of the original document and must be filled in by a coverage model using the current generated summary. A novel unsupervised training procedure leverages this coverage model along with a fluency model to generate and score summaries. When tested on popular news summarization datasets, the method outperforms previous unsupervised methods by more than 2 R-1 points, and approaches results of competitive supervised methods. Our model attains higher levels of abstraction with copied passages roughly two times shorter than prior work, and learns to compress and merge sentences without supervision.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="laban-etal-2020-summary">
<titleInfo>
<title>The Summary Loop: Learning to Write Abstractive Summaries Without Examples</title>
</titleInfo>
<name type="personal">
<namePart type="given">Philippe</namePart>
<namePart type="family">Laban</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Hsi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Canny</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marti</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Hearst</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-jul</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This work presents a new approach to unsupervised abstractive summarization based on maximizing a combination of coverage and fluency for a given length constraint. It introduces a novel method that encourages the inclusion of key terms from the original document into the summary: key terms are masked out of the original document and must be filled in by a coverage model using the current generated summary. A novel unsupervised training procedure leverages this coverage model along with a fluency model to generate and score summaries. When tested on popular news summarization datasets, the method outperforms previous unsupervised methods by more than 2 R-1 points, and approaches results of competitive supervised methods. Our model attains higher levels of abstraction with copied passages roughly two times shorter than prior work, and learns to compress and merge sentences without supervision.</abstract>
<identifier type="citekey">laban-etal-2020-summary</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.460</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.460</url>
</location>
<part>
<date>2020-jul</date>
<extent unit="page">
<start>5135</start>
<end>5150</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Summary Loop: Learning to Write Abstractive Summaries Without Examples
%A Laban, Philippe
%A Hsi, Andrew
%A Canny, John
%A Hearst, Marti A.
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 jul
%I Association for Computational Linguistics
%C Online
%F laban-etal-2020-summary
%X This work presents a new approach to unsupervised abstractive summarization based on maximizing a combination of coverage and fluency for a given length constraint. It introduces a novel method that encourages the inclusion of key terms from the original document into the summary: key terms are masked out of the original document and must be filled in by a coverage model using the current generated summary. A novel unsupervised training procedure leverages this coverage model along with a fluency model to generate and score summaries. When tested on popular news summarization datasets, the method outperforms previous unsupervised methods by more than 2 R-1 points, and approaches results of competitive supervised methods. Our model attains higher levels of abstraction with copied passages roughly two times shorter than prior work, and learns to compress and merge sentences without supervision.
%R 10.18653/v1/2020.acl-main.460
%U https://aclanthology.org/2020.acl-main.460
%U https://doi.org/10.18653/v1/2020.acl-main.460
%P 5135-5150
Markdown (Informal)
[The Summary Loop: Learning to Write Abstractive Summaries Without Examples](https://aclanthology.org/2020.acl-main.460) (Laban et al., ACL 2020)
ACL