@inproceedings{zhu-etal-2025-language-models,
title = "Language Models Can Infer Action Semantics for Symbolic Planners from Environment Feedback",
author = "Zhu, Wang Bill and
Singh, Ishika and
Jia, Robin and
Thomason, Jesse",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.440/",
pages = "8751--8773",
ISBN = "979-8-89176-189-6",
abstract = "Symbolic planners can discover a sequence of actions from initial to goal states given expert-defined, domain-specific logical action semantics. Large Language Models (LLMs) can directly generate such sequences, but limitations in reasoning and state-tracking often result in plans that are insufficient or unexecutable. We propose Predicting Semantics of Actions with Language Models (PSALM), which automatically learns action semantics by leveraging the strengths of both symbolic planners and LLMs. PSALM repeatedly proposes and executes plans, using the LLM to partially generate plans and to infer domain-specific action semantics based on execution outcomes. PSALM maintains a belief over possible action semantics that is iteratively updated until a goal state is reached. Experiments on 7 environments show that when learning just from one goal, PSALM boosts plan success rate from 36.4{\%} (on Claude-3.5) to 100{\%}, and explores the environment more efficiently than prior work to infer ground truth domain action semantics."
}
Markdown (Informal)
[Language Models Can Infer Action Semantics for Symbolic Planners from Environment Feedback](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.440/) (Zhu et al., NAACL 2025)
ACL