@inproceedings{xiao-etal-2022-primera,
title = "{PRIMERA}: Pyramid-based Masked Sentence Pre-training for Multi-document Summarization",
author = "Xiao, Wen and
Beltagy, Iz and
Carenini, Giuseppe and
Cohan, Arman",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.acl-long.360/",
doi = "10.18653/v1/2022.acl-long.360",
pages = "5245--5263",
abstract = "We introduce PRIMERA, a pre-trained model for multi-document representation with a focus on summarization that reduces the need for dataset-specific architectures and large amounts of fine-tuning labeled data. PRIMERA uses our newly proposed pre-training objective designed to teach the model to connect and aggregate information across documents. It also uses efficient encoder-decoder transformers to simplify the processing of concatenated input documents. With extensive experiments on 6 multi-document summarization datasets from 3 different domains on zero-shot, few-shot and full-supervised settings, PRIMERA outperforms current state-of-the-art dataset-specific and pre-trained models on most of these settings with large margins."
}
Markdown (Informal)
[PRIMERA: Pyramid-based Masked Sentence Pre-training for Multi-document Summarization](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.acl-long.360/) (Xiao et al., ACL 2022)
ACL