@inproceedings{ishibashi-etal-2025-large,
title = "Can Large Language Models Invent Algorithms to Improve Themselves?: Algorithm Discovery for Recursive Self-Improvement through Reinforcement Learning",
author = "Ishibashi, Yoichi and
Yano, Taro and
Oyamada, Masafumi",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-chao-zhang2/2025.naacl-long.519/",
doi = "10.18653/v1/2025.naacl-long.519",
pages = "10332--10363",
ISBN = "979-8-89176-189-6",
abstract = "Large Language Models (LLMs) have shown remarkable performance improvements and are rapidly gaining adoption in industry. However, the methods for improving LLMs are still designed by humans, which restricts the invention of new model-improving algorithms to human expertise and imagination. To address this, we propose the \textit{Self-Developing} framework, which enables LLMs to autonomously generate and learn model-improvement algorithms. In this framework, the seed model generates, applies, and learns model-improving algorithms, continuously improving both the seed model and the algorithms themselves. Among model-improving strategies, we focus on model merging algorithms. In mathematical reasoning tasks, Self-Developing discovers novel merging strategies and outperforms human-designed methods. On GSM8k, the discovered algorithms improve the seed model by 6{\%} and surpass human-designed methods by 4.3{\%}. Moreover, they exhibit strong transferability, achieving a 7.4{\%} performance gain on out-of-domain models. These results suggest that LLMs can autonomously develop effective model-improvement techniques beyond human intuition."
}Markdown (Informal)
[Can Large Language Models Invent Algorithms to Improve Themselves?: Algorithm Discovery for Recursive Self-Improvement through Reinforcement Learning](https://preview.aclanthology.org/author-page-chao-zhang2/2025.naacl-long.519/) (Ishibashi et al., NAACL 2025)
ACL