@inproceedings{soleimani-etal-2023-nonfacts,
title = "{N}on{F}act{S}: {N}on{F}actual Summary Generation for Factuality Evaluation in Document Summarization",
author = "Soleimani, Amir and
Monz, Christof and
Worring, Marcel",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-acl.400/",
doi = "10.18653/v1/2023.findings-acl.400",
pages = "6405--6419",
abstract = "Pre-trained abstractive summarization models can generate fluent summaries and achieve high ROUGE scores. Previous research has found that these models often generate summaries that are inconsistent with their context document and contain nonfactual information. To evaluate factuality in document summarization, a document-level Natural Language Inference (NLI) classifier can be used. However, training such a classifier requires large-scale high-quality factual and nonfactual samples. To that end, we introduce NonFactS, a data generation model, to synthesize nonfactual summaries given a context document and a human-annotated (reference) factual summary. Compared to previous methods, our nonfactual samples are more abstractive and more similar to their corresponding factual samples, resulting in state-of-the-art performance on two factuality evaluation benchmarks, FALSESUM and SUMMAC. Our experiments demonstrate that even without human-annotated summaries, NonFactS can use random sentences to generate nonfactual summaries and a classifier trained on these samples generalizes to out-of-domain documents."
}
Markdown (Informal)
[NonFactS: NonFactual Summary Generation for Factuality Evaluation in Document Summarization](https://preview.aclanthology.org/fix-sig-urls/2023.findings-acl.400/) (Soleimani et al., Findings 2023)
ACL