@inproceedings{li-etal-2025-encouraging,
title = "Encouraging Good Processes Without the Need for Good Answers: Reinforcement Learning for {LLM} Agent Planning",
author = "Li, Zhiwei and
Hu, Yong and
Wang, Wenqing",
editor = "Potdar, Saloni and
Rojas-Barahona, Lina and
Montella, Sebastien",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2025",
address = "Suzhou (China)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.116/",
pages = "1654--1666",
ISBN = "979-8-89176-333-3",
abstract = "The functionality of Large Language Model (LLM) agents is primarily determined by two capabilities: action planning and answer summarization. The former, action planning, is the core capability that dictates an agent{'}s performance. However, prevailing training paradigms employ end-to-end, multi-objective optimization that jointly trains both capabilities. This paradigm faces two critical challenges: imbalanced optimization objective allocation and scarcity of verifiable data, making it difficult to enhance the agent{'}s planning capability. To address these challenges, we propose Reinforcement Learning with Tool-use Rewards (RLTR), a novel framework that decouples the training process to enable a focused, single-objective optimization of the planning module. Crucially, RLTR introduces a reward signal based on tool-use completeness to directly evaluate the quality of tool invocation sequences. This method offers a more direct and reliable training signal than assessing the final response content, thereby obviating the need for verifiable data. Our experiments demonstrate that RLTR achieves an 8{\%}{--}12{\%} improvement in planning performance compared to end-to-end baselines. Moreover, this enhanced planning capability, in turn, translates to a 5{\%}{--}6{\%} increase in the final response quality of the overall agent system."
}Markdown (Informal)
[Encouraging Good Processes Without the Need for Good Answers: Reinforcement Learning for LLM Agent Planning](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.116/) (Li et al., EMNLP 2025)
ACL