@inproceedings{li-etal-2025-gift,
title = "{G}i{FT}: {G}ibbs Fine-Tuning for Code Generation",
author = "Li, Haochen and
Feng, Wanjin and
Zhou, Xin and
Shen, Zhiqi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.599/",
pages = "12271--12284",
ISBN = "979-8-89176-251-0",
abstract = "Training Large Language Models (LLMs) with synthetic data is a prevalent practice in code generation. A key approach is self-training, where LLMs are iteratively trained on self-generated correct code snippets. In this case, the self-generated codes are drawn from a conditional distribution, conditioned on a specific seed description. However, the seed description is not the only valid representation that aligns with its intended meaning. With all valid descriptions and codes forming a joint space, codes drawn from the conditional distribution would lead to an underrepresentation of the full description-code space. As such, we propose Gibbs Fine-Tuning (GiFT), a novel self-training method inspired by Gibbs sampling. GiFT allows self-generated data to be drawn from the marginal distribution of the joint space, thereby mitigating the biases inherent in conditional sampling. We provide a theoretical analysis demonstrating the potential benefits of fine-tuning LLMs with code derived from the marginal distribution. Furthermore, we propose a perplexity-based code selection method to mitigate the imbalanced long-tail distribution of the self-generated codes. Empirical evaluation of two LLMs across four datasets demonstrates that GiFT achieves superior performance, particularly on more challenging benchmarks. Source code is available at \url{https://github.com/Alex-HaochenLi/GiFT}."
}
Markdown (Informal)
[GiFT: Gibbs Fine-Tuning for Code Generation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.599/) (Li et al., ACL 2025)
ACL
- Haochen Li, Wanjin Feng, Xin Zhou, and Zhiqi Shen. 2025. GiFT: Gibbs Fine-Tuning for Code Generation. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12271–12284, Vienna, Austria. Association for Computational Linguistics.