@inproceedings{ni-etal-2025-viscoder,
title = "{V}is{C}oder: Fine-Tuning {LLM}s for Executable Python Visualization Code Generation",
author = "Ni, Yuansheng and
Nie, Ping and
Zou, Kai and
Yue, Xiang and
Chen, Wenhu",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.160/",
doi = "10.18653/v1/2025.findings-emnlp.160",
pages = "2956--2983",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) often struggle with visualization tasks like plotting diagrams, charts, where success depends on both code correctness and visual semantics. Existing instruction-tuning datasets lack execution-grounded supervision and offer limited support for iterative code correction, resulting in fragile and unreliable plot generation. We present **VisCode-200K**, a large-scale instruction tuning dataset for Python-based visualization and self-correction. It contains over 200K examples from two sources: (1) validated plotting code from open-source repositories, paired with natural language instructions and rendered plots; and (2) 45K multi-turn correction dialogues from Code-Feedback, enabling models to revise faulty code using runtime feedback. We fine-tune Qwen2.5-Coder-Instruct on VisCode-200K to create **VisCoder**, and evaluate it on PandasPlotBench. VisCoder significantly outperforms strong open-source baselines and approaches the performance of proprietary models like GPT-4o-mini. We further adopt a self-debug evaluation protocol to assess iterative repair, demonstrating the benefits of feedback-driven learning for executable, visually accurate code generation."
}Markdown (Informal)
[VisCoder: Fine-Tuning LLMs for Executable Python Visualization Code Generation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.160/) (Ni et al., Findings 2025)
ACL