@inproceedings{hu-etal-2025-ltrag,
title = "{LTRAG}: Enhancing Autoformalization and Self-refinement for Logical Reasoning with Thought-Guided {RAG}",
author = "Hu, Ruikang and
Lin, Shaoyu and
Xiu, Yeliang and
Liu, Yongmei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.126/",
pages = "2483--2493",
ISBN = "979-8-89176-256-5",
abstract = "Logical reasoning is fundamental to intelligent systems. Large language models (LLMs) have demonstrated promise in natural language (NL) reasoning, especially with techniques like chain-of-thought (CoT) prompting. Neuro-symbolic methods like Logic-LM and LINC further enhance performance on challenging datasets FOLIO and AR-LSAT by integrating formalization with LLMs and symbolic solvers, and possibly refinement with LLMs. However, these methods still struggle with the accurate formalization of complex NL problems.In this paper, we introduce LTRAG, a framework to enhance autoformalization and self-refinement for logical reasoning with Retrieval-Augmented Generation (RAG), by building knowledge bases of thought-guided examples (https://github.com/sysulic/LTRAG ).Experimental results on FOLIO and AR-LSAT show that LTRAG consistently outperforms Logic-LM and LINC across different models. On GPT-4 and AR-LSAT, it achieves an accuracy gain of 13{\%} over Logic-LM."
}
Markdown (Informal)
[LTRAG: Enhancing Autoformalization and Self-refinement for Logical Reasoning with Thought-Guided RAG](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.126/) (Hu et al., Findings 2025)
ACL