@inproceedings{rentschler-roberts-2025-rl,
title = "{RL} + Transformer = A General-Purpose Problem Solver",
author = "Rentschler, Micah and
Roberts, Jesse",
editor = "Kamalloo, Ehsan and
Gontier, Nicolas and
Lu, Xing Han and
Dziri, Nouha and
Murty, Shikhar and
Lacoste, Alexandre",
booktitle = "Proceedings of the 1st Workshop for Research on Agent Language Models (REALM 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.realm-1.29/",
doi = "10.18653/v1/2025.realm-1.29",
pages = "401--410",
ISBN = "979-8-89176-264-0",
abstract = "What if artificial intelligence could not only solve problems for which it was trained but also teach itself to tackle novel tasks? In this paper, we finetune Llama 3.1 using reinforcement learning on the grid-world game Frozen Lake and investigate its ability to solve maps it has never encountered{---}a phenomenon recently termed In-Context Reinforcement Learning (ICRL). Without additional training, the transformer demonstrates the capacity to adapt to both in-distribution and out-of-distribution environment parameterizations. Moreover, it remains effective when trained on data that blends optimal and suboptimal behavior, combines strategies from its context (behavior-stitching), and dynamically adapts to non-stationary environments. These proof-of-concept findings suggest that in-context learning via reinforcement-tuned transformers may form the basis of a promising general-purpose problem-solver."
}
Markdown (Informal)
[RL + Transformer = A General-Purpose Problem Solver](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.realm-1.29/) (Rentschler & Roberts, REALM 2025)
ACL
- Micah Rentschler and Jesse Roberts. 2025. RL + Transformer = A General-Purpose Problem Solver. In Proceedings of the 1st Workshop for Research on Agent Language Models (REALM 2025), pages 401–410, Vienna, Austria. Association for Computational Linguistics.