@inproceedings{xiao-etal-2025-csr,
title = "{CSR}-Bench: Benchmarking {LLM} Agents in Deployment of Computer Science Research Repositories",
author = "Xiao, Yijia and
Wang, Runhui and
Kong, Luyang and
Golac, Davor and
Wang, Wei",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.633/",
pages = "12705--12723",
ISBN = "979-8-89176-189-6",
abstract = "The increasing complexity of computer science research projects demands more effective tools for deploying code repositories. Large Language Models (LLMs), such as Anthropic Claude and Meta Llama, have demonstrated significant advancements across various fields of computer science research, including the automation of diverse software engineering tasks. To evaluate the effectiveness of LLMs in handling complex code development tasks of research projects, particularly for NLP/CV/AI/ML/DM topics, we introduce CSR-Bench, a benchmark for Computer Science Research projects. This benchmark assesses LLMs from various aspects including accuracy, efficiency, and deployment script quality, aiming to explore their potential in conducting computer science research autonomously. We also introduce a novel framework, CSR-Agents, that utilizes multiple LLM agents to automate the deployment of GitHub code repositories of computer science research projects. Specifically, by checking instructions from markdown files and interpreting repository structures, the model generates and iteratively improves bash commands that set up the experimental environments and deploy the code to conduct research tasks. Preliminary results from CSR-Bench indicate that LLM agents can significantly enhance the workflow of repository deployment, thereby boosting developer productivity and improving the management of developmental workflows."
}
Markdown (Informal)
[CSR-Bench: Benchmarking LLM Agents in Deployment of Computer Science Research Repositories](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.633/) (Xiao et al., NAACL 2025)
ACL