@inproceedings{riyad-junaed-2025-adversaryai,
title = "{A}dversary{AI} at {BLP}-2025 Task 2: A Think, Refine, and Generate ({T}ri{G}en) System with {L}o{RA} and Self-Refinement for Code Generation",
author = "Riyad, Omar Faruqe and
Junaed, Jahedul Alam",
editor = "Alam, Firoj and
Kar, Sudipta and
Chowdhury, Shammur Absar and
Hassan, Naeemul and
Prince, Enamul Hoque and
Tasnim, Mohiuddin and
Rony, Md Rashad Al Hasan and
Rahman, Md Tahmid Rahman",
booktitle = "Proceedings of the Second Workshop on Bangla Language Processing (BLP-2025)",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.banglalp-1.65/",
pages = "629--641",
ISBN = "979-8-89176-314-2",
abstract = "In this paper, we propose a system for generating Python code from Bangla prompts. Our approach fine-tunes open-source models with parameter-efficient techniques and leverages proprietary models via prompting. To enhance the reasoning of smaller models, we adopt a Chain-of-Thought (CoT) augmented fine-tuning, enabling them to learn intermediate reasoning steps before generating code. A self-refinement loop further improves performance by iteratively critiquing and correcting code based on execution feedback. We also employ few-shot prompting to guide inference more effectively. Applied to both open-source and proprietary models, this pipeline achieved its best results with Gemini 2.5 Pro, where our system ranked 4th on the competition leaderboard with a Pass@1 score of 0.85. We conclude with a detailed analysis of these findings."
}Markdown (Informal)
[AdversaryAI at BLP-2025 Task 2: A Think, Refine, and Generate (TriGen) System with LoRA and Self-Refinement for Code Generation](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.banglalp-1.65/) (Riyad & Junaed, BanglaLP 2025)
ACL