@inproceedings{yu-2022-evaluating,
title = "Evaluating Pre-Trained Language Models on Multi-Document Summarization for Literature Reviews",
author = "Yu, Benjamin",
editor = "Cohan, Arman and
Feigenblat, Guy and
Freitag, Dayne and
Ghosal, Tirthankar and
Herrmannova, Drahomira and
Knoth, Petr and
Lo, Kyle and
Mayr, Philipp and
Shmueli-Scheuer, Michal and
de Waard, Anita and
Wang, Lucy Lu",
booktitle = "Proceedings of the Third Workshop on Scholarly Document Processing",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.sdp-1.22/",
pages = "188--192",
abstract = "Systematic literature reviews in the biomedical space are often expensive to conduct. Automation through machine learning and large language models could improve the accuracy and research outcomes from such reviews. In this study, we evaluate a pre-trained LongT5 model on the MSLR22: Multi-Document Summarization for Literature Reviews Shared Task datasets. We weren`t able to make any improvements on the dataset benchmark, but we do establish some evidence that current summarization metrics are insufficient in measuring summarization accuracy. A multi-document summarization web tool was also built to demonstrate the viability of summarization models for future investigators: \url{https://ben-yu.github.io/summarizer}"
}
Markdown (Informal)
[Evaluating Pre-Trained Language Models on Multi-Document Summarization for Literature Reviews](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.sdp-1.22/) (Yu, sdp 2022)
ACL