@inproceedings{bao-etal-2025-surveygen,
title = "{S}urvey{G}en: Quality-Aware Scientific Survey Generation with Large Language Models",
author = "Bao, Tong and
Nayeem, Mir Tafseer and
Rafiei, Davood and
Zhang, Chengzhi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/lei-li-partial-disambiguation/2025.emnlp-main.136/",
pages = "2712--2736",
ISBN = "979-8-89176-332-6",
abstract = "Automatic survey generation has emerged as a key task in scientific document processing. While large language models (LLMs) have shown promise in generating survey texts, the lack of standardized evaluation datasets critically hampers rigorous assessment of their performance against human-written surveys. In this work, we present SurveyGen, a large-scale dataset comprising over 4,200 human-written surveys across diverse scientific domains, along with 242,143 cited references and extensive quality-related metadata for both the surveys and the cited papers. Leveraging this resource, we build QUAL-SG, a novel quality-aware framework for survey generation that enhances the standard Retrieval-Augmented Generation (RAG) pipeline by incorporating quality-aware indicators into literature retrieval to assess and select higher-quality source papers. Using this dataset and framework, we systematically evaluate state-of-the-art LLMs under varying levels of human involvement{---}from fully automatic generation to human-guided writing. Experimental results and human evaluations show that while semi-automatic pipelines can achieve partially competitive outcomes, fully automatic survey generation still suffers from low citation quality and limited critical analysis."
}Markdown (Informal)
[SurveyGen: Quality-Aware Scientific Survey Generation with Large Language Models](https://preview.aclanthology.org/lei-li-partial-disambiguation/2025.emnlp-main.136/) (Bao et al., EMNLP 2025)
ACL