@inproceedings{zhao-etal-2025-correcting,
title = "Correcting on Graph: Faithful Semantic Parsing over Knowledge Graphs with Large Language Models",
author = "Zhao, Ruilin and
Zhao, Feng and
Zhang, Hong",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.280/",
pages = "5364--5376",
ISBN = "979-8-89176-256-5",
abstract = "Complex multi-hop questions often require comprehensive retrieval and reasoning. As a result, effectively parsing such questions and establishing an efficient interaction channel between large language models (LLMs) and knowledge graphs (KGs) is essential for ensuring reliable reasoning. In this paper, we present a novel semantic parsing framework Correcting on Graph (CoG), aiming to establish faithful logical queries that connect LLMs and KGs. We first propose a structured knowledge decoding that enables the LLM to generate fact-aware logical queries during inference, while leveraging its parametric knowledge to fill in the blank intermediate entities. Then, we introduce a knowledge path correction that combines the logical query with KGs to correct hallucination entities and path deficiencies in the generated content, ensuring the reliability and comprehensiveness of the retrieved knowledge. Extensive experiments demonstrate that CoG outperforms the state-of-the-art KGQA methods on two knowledge-intensive question answering benchmarks. CoG achieves a high answer hit rate and exhibits competitive F1 performance for complex multi-hop questions."
}
Markdown (Informal)
[Correcting on Graph: Faithful Semantic Parsing over Knowledge Graphs with Large Language Models](https://preview.aclanthology.org/landing_page/2025.findings-acl.280/) (Zhao et al., Findings 2025)
ACL