@inproceedings{alazraki-rei-2025-meta,
title = "Meta-Reasoning Improves Tool Use in Large Language Models",
author = "Alazraki, Lisa and
Rei, Marek",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.440/",
pages = "7885--7897",
ISBN = "979-8-89176-195-7",
abstract = "External tools help large language models succeed at tasks where they would otherwise typically fail. In existing frameworks, choosing tools at test time relies on naive greedy decoding, regardless of whether the model has been fine-tuned on tool-annotated data or prompted with in-context examples. In contrast, we find that gathering and choosing among a suitable set of candidate tools has greater potential to lead to an optimal selection. We present Tool selECTion via meta-reasONing (TECTON), a two-phase system that first *reasons* over a task and outputs candidate tools using a custom fine-tuned language modelling head. Then, with the custom head disabled, it *meta-reasons* (i.e., it reasons over the previous reasoning process) to make a final choice. We show that TECTON results in substantial gains{---}both in-distribution and out-of-distribution{---}on a range of math reasoning datasets."
}
Markdown (Informal)
[Meta-Reasoning Improves Tool Use in Large Language Models](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.findings-naacl.440/) (Alazraki & Rei, Findings 2025)
ACL