@inproceedings{wang-etal-2024-symbolic,
title = "Symbolic Working Memory Enhances Language Models for Complex Rule Application",
author = "Wang, Siyuan and
Wei, Zhongyu and
Choi, Yejin and
Ren, Xiang",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.974/",
doi = "10.18653/v1/2024.emnlp-main.974",
pages = "17583--17604",
abstract = "Large Language Models (LLMs) have shown remarkable reasoning performance but struggle with multi-step deductive reasoning involving a series of rule application steps, especially when rules are presented non-sequentially. Our preliminary analysis shows that while LLMs excel in single-step rule application, their performance drops significantly in multi-step scenarios due to the challenge in rule grounding. It requires anchoring the applicable rule and supporting facts at each step, amidst multiple input rules, facts, and inferred facts. To address this, we propose augmenting LLMs with external working memory and introduce a neurosymbolic framework for rule application. The memory stores facts and rules in both natural language and symbolic forms, enabling precise tracking. Utilizing this memory, our framework iteratively performs symbolic rule grounding and LLM-based rule implementation. The former matches predicates and variables of symbolic rules and facts to ground applicable rules at each step. Experiments indicate our framework`s effectiveness in rule application and its robustness across various steps and settings."
}
Markdown (Informal)
[Symbolic Working Memory Enhances Language Models for Complex Rule Application](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.974/) (Wang et al., EMNLP 2024)
ACL