@inproceedings{liu-etal-2025-sync,
title = "{S}yn{C}-{LLM}: Generation of Large-Scale Synthetic Circuit Code with Hierarchical Language Models",
author = "Liu, Shang and
Lu, Yao and
Fang, Wenji and
Wang, Jing and
Xie, Zhiyao",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.877/",
pages = "17361--17376",
ISBN = "979-8-89176-332-6",
abstract = "In recent years, AI-assisted integrated circuit (IC) design methods have shown great potential in boosting IC design efficiency. However, this emerging technique is fundamentally limited by the serious scarcity of publicly accessible large-scale circuit design data, which are mostly private IPs owned by semiconductor companies. In this work, we propose SynC-LLM, the first technique that exploits LLM{'}s ability to generate new large-scale synthetic digital circuits. In our hierarchical circuit generation process, we first design a directed graph diffusion model to learn and generate the skeleton of large circuits with sequential cells. Then we propose a cone function retrieval technique to annotate each sequential node in the skeleton with a function description. Finally, we apply a level-by-level customized prompting technique utilizing LLM to complete the code at every skeleton cone. Experiments show that our generated circuits are not only valid and fully functional, but also closely resemble realistic large-scale designs and can significantly improve AI models' performance in multiple IC design tasks. The code and data are open-sourced in https://github.com/hkust-zhiyao/SynCircuitData."
}Markdown (Informal)
[SynC-LLM: Generation of Large-Scale Synthetic Circuit Code with Hierarchical Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.877/) (Liu et al., EMNLP 2025)
ACL