@inproceedings{li-etal-2025-unlocking,
title = "Unlocking the Planning Capabilities of Large Language Models with Maximum Diversity Fine-tuning",
author = "Li, Wenjun and
Chen, Changyu and
Varakantham, Pradeep",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.183/",
pages = "3318--3340",
ISBN = "979-8-89176-195-7",
abstract = "Large language models (LLMs) have demonstrated impressive task-solving capabilities through prompting techniques and system designs, including solving planning tasks (e.g., math proofs, basic travel planning) when sufficient data is available online and used during pre-training. However, for planning tasks with limited prior data (e.g., blocks world, advanced travel planning), the performance of LLMs, including proprietary models like GPT and Gemini, is poor. This paper investigates the impact of fine-tuning on the planning capabilities of LLMs, revealing that LLMs can achieve strong performance in planning through substantial (tens of thousands of specific examples) fine-tuning. Yet, this process incurs high economic, time, and computational costs for each planning problem variation. To address this, we propose Clustering-Based Maximum Diversity Sampling (CMDS), which selects diverse and representative data to enhance sample efficiency and the model{'}s generalization capability. Extensive evaluations demonstrate that CMDS-$l$, a baseline method combining CMDS with language embeddings, outperforms random sampling. Furthermore, we introduce a novel algorithm, CMDS-$g$, which encodes planning task instances with their graph representations into the embedding space. Empirical results show that CMDS-$g$ consistently outperforms baseline methods across various scales and multiple benchmark domains."
}
Markdown (Informal)
[Unlocking the Planning Capabilities of Large Language Models with Maximum Diversity Fine-tuning](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.183/) (Li et al., Findings 2025)
ACL