@inproceedings{wang-etal-2023-llm4vis,
title = "{LLM}4{V}is: Explainable Visualization Recommendation using {C}hat{GPT}",
author = "Wang, Lei and
Zhang, Songheng and
Wang, Yun and
Lim, Ee-Peng and
Wang, Yong",
editor = "Wang, Mingxuan and
Zitouni, Imed",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.64/",
doi = "10.18653/v1/2023.emnlp-industry.64",
pages = "675--692",
abstract = "Data visualization is a powerful tool for exploring and communicating insights in various domains. To automate visualization choice for datasets, a task known as visualization recommendation has been proposed. Various machine-learning-based approaches have been developed for this purpose, but they often require a large corpus of dataset-visualization pairs for training and lack natural explanations for their results. To address this research gap, we propose LLM4Vis, a novel ChatGPT-based prompting approach to perform visualization recommendation and return human-like explanations using very few demonstration examples. Our approach involves feature description, demonstration example selection, explanation generation, demonstration example construction, and inference steps. To obtain demonstration examples with high-quality explanations, we propose a new explanation generation bootstrapping to iteratively refine generated explanations by considering the previous generation and template-based hint. Evaluations on the VizML dataset show that LLM4Vis outperforms or performs similarly to supervised learning models like Random Forest, Decision Tree, and MLP, in both few-shot and zero-shot settings. The qualitative evaluation also shows the effectiveness of explanations generated by LLM4Vis."
}
Markdown (Informal)
[LLM4Vis: Explainable Visualization Recommendation using ChatGPT](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-industry.64/) (Wang et al., EMNLP 2023)
ACL