@inproceedings{peyrard-west-2020-klearn,
title = "{KL}earn: Background Knowledge Inference from Summarization Data",
author = "Peyrard, Maxime and
West, Robert",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.188/",
doi = "10.18653/v1/2020.findings-emnlp.188",
pages = "2073--2085",
abstract = "The goal of text summarization is to compress documents to the relevant information while excluding background information already known to the receiver. So far, summarization researchers have given considerably more attention to relevance than to background knowledge. In contrast, this work puts background knowledge in the foreground. Building on the realization that the choices made by human summarizers and annotators contain implicit information about their background knowledge, we develop and compare techniques for inferring background knowledge from summarization data. Based on this framework, we define summary scoring functions that explicitly model background knowledge, and show that these scoring functions fit human judgments significantly better than baselines. We illustrate some of the many potential applications of our framework. First, we provide insights into human information importance priors. Second, we demonstrate that averaging the background knowledge of multiple, potentially biased annotators or corpora greatly improves summaryscoring performance. Finally, we discuss potential applications of our framework beyond summarization."
}
Markdown (Informal)
[KLearn: Background Knowledge Inference from Summarization Data](https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.188/) (Peyrard & West, Findings 2020)
ACL