@inproceedings{zhang-etal-2025-coderag,
title = "{C}ode{RAG}: Finding Relevant and Necessary Knowledge for Retrieval-Augmented Repository-Level Code Completion",
author = "Zhang, Sheng and
Ding, Yifan and
Lian, Shuquan and
Song, Shun and
Li, Hui",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1187/",
pages = "23289--23299",
ISBN = "979-8-89176-332-6",
abstract = "Repository-level code completion automatically predicts the unfinished code based on the broader information from the repository. Recent strides in Code Large Language Models (code LLMs) have spurred the development of repository-level code completion methods, yielding promising results. Nevertheless, they suffer from issues such as inappropriate query construction, single-path code retrieval, and misalignment between code retriever and code LLM. To address these problems, we introduce CodeRAG, a framework tailored to identify relevant and necessary knowledge for retrieval-augmented repository-level code completion. Its core components include log probability guided query construction, multi-path code retrieval, and preference-aligned BestFit reranking. Extensive experiments on benchmarks ReccEval and CCEval demonstrate that CodeRAG significantly and consistently outperforms state-of-the-art methods. The implementation of CodeRAG is available at https://github.com/KDEGroup/CodeRAG."
}Markdown (Informal)
[CodeRAG: Finding Relevant and Necessary Knowledge for Retrieval-Augmented Repository-Level Code Completion](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1187/) (Zhang et al., EMNLP 2025)
ACL