@inproceedings{dihan-etal-2025-teamb2b,
title = "{T}eam{B}2{B} at {BLP}-2025 Task 2: {B}angla{F}orge: {LLM} Collaboration with Self-Refinement for {B}angla Code Generation",
author = "Dihan, Mahir Labib and
Ahmed, Sadif and
Rahman, Md Nafiu",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Hassan, Naeemul and
Prince, Enamul Hoque and
Tasnim, Mohiuddin and
Rony, Md Rashad Al Hasan and
Rahman, Md Tahmid Rahman",
booktitle = "Proceedings of the Second Workshop on Bangla Language Processing (BLP-2025)",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.banglalp-1.66/",
pages = "642--655",
ISBN = "979-8-89176-314-2",
abstract = "Bangla is a low-resource language for code generation, lacking large-scale annotated datasets and tools to transform natural language specifications into executable programs. This makes Bangla-to-code generation a challenging task requiring innovative solutions. To address this, we introduce BanglaForge, a novel framework for generating code from Bangla function descriptions. BanglaForge leverages a retrieval-augmented dual-model collaboration paradigm with self-refinement, combining in-context learning, llm-based translation, systematic prompt engineering, and iterative self-refinement based on execution feedback, where a coder generates initial solutions and a reviewer enhances them for robustness. On the BLP-2025 Bangla Code Generation benchmark, BanglaForge achieves a competitive Pass@1 accuracy of 84.00{\%}, demonstrating the effectiveness of retrieval, model collaboration, and self-refinement for low-resource Bangla code generation."
}Markdown (Informal)
[TeamB2B at BLP-2025 Task 2: BanglaForge: LLM Collaboration with Self-Refinement for Bangla Code Generation](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.banglalp-1.66/) (Dihan et al., BanglaLP 2025)
ACL