@inproceedings{qian-etal-2024-toolink,
title = "Toolink: Linking Toolkit Creation and Using through Chain-of-Solving on Open-Source Model",
author = "Qian, Cheng and
Xiong, Chenyan and
Liu, Zhenghao and
Liu, Zhiyuan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.naacl-long.48/",
doi = "10.18653/v1/2024.naacl-long.48",
pages = "831--854",
abstract = "Large Language Models (LLMs) have demonstrated remarkable progress in utilizing tools, but their closed-source nature and high inference costs pose limitations on their adaptability, necessitating a valid method that leverages smaller, open-sourced models. In this paper, we introduce Toolink, a comprehensive framework that performs task-solving by first creating a toolkit and then integrating the planning and calling of tools through a chain-of-solving (CoS) approach. We first validate the efficacy of Toolink in harnessing the model{'}s creativity and CoS ability on ChatGPT. Subsequently, we curate CoS-GPT, a chain-of-solving dataset designed for tool-using, and finetune the LLaMA-7B model. It results in LLaMA-CoS, a powerful open-source model with advanced tool-planning and tool-calling capabilities. Evaluation of diverse tasks from BIG-bench demonstrates its CoS ability matches that of ChatGPT while its performance surpasses the chain-of-thought approach. Further studies highlight the generalization of LLaMA-CoS to unseen tasks and showcase its capability in using toolkits not explicitly tailored for the target task, affirming its robustness in real-world scenarios. All codes and data are released."
}
Markdown (Informal)
[Toolink: Linking Toolkit Creation and Using through Chain-of-Solving on Open-Source Model](https://preview.aclanthology.org/fix-sig-urls/2024.naacl-long.48/) (Qian et al., NAACL 2024)
ACL