@inproceedings{jin-etal-2025-verilocc,
title = "{V}eri{L}occ: End-to-End Cross-Architecture Register Allocation via {LLM}",
author = "Jin, Lesheng and
Ruan, Zhenyuan and
Mai, Haohui and
Shang, Jingbo",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-patrick-haller/2025.emnlp-main.1538/",
doi = "10.18653/v1/2025.emnlp-main.1538",
pages = "30240--30250",
ISBN = "979-8-89176-332-6",
abstract = "Modern GPUs evolve rapidly, yet production compilers still rely on hand-crafted register allocation heuristics that require substantial re-tuning for each hardware generation. We introduce VeriLocc, a framework that combines large language models (LLMs) with formal compiler techniques to enable generalizable and verifiable register allocation across GPU architectures. VeriLocc fine-tunes an LLM to translate intermediate representations (MIRs) into target-specific register assignments, aided by static analysis for cross-architecture normalization and generalization and a verifier-guided regeneration loop to ensure correctness. Evaluated on matrix multiplication (GEMM) and multi-head attention (MHA), VeriLocc achieves 85{--}99{\%} single-shot accuracy and near-100{\%} pass@100. Case study shows that VeriLocc discovers more performant assignments than expert-tuned libraries, outperforming rocBLAS by over 10{\%} in runtime."
}Markdown (Informal)
[VeriLocc: End-to-End Cross-Architecture Register Allocation via LLM](https://preview.aclanthology.org/author-page-patrick-haller/2025.emnlp-main.1538/) (Jin et al., EMNLP 2025)
ACL