@inproceedings{huang-etal-2025-llms-good,
title = "Can {LLM}s be Good Graph Judge for Knowledge Graph Construction?",
author = "Huang, Haoyu and
Chen, Chong and
Sheng, Zeang and
Li, Yang and
Zhang, Wentao",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.554/",
pages = "10940--10959",
ISBN = "979-8-89176-332-6",
abstract = "In real-world scenarios, most of the data obtained from the information retrieval (IR) system is unstructured. Converting natural language sentences into structured Knowledge Graphs (KGs) remains a critical challenge. We identified three limitations with respect to existing KG construction methods: (1) There could be a large amount of noise in real-world documents, which could result in extracting messy information. (2) Naive LLMs usually extract inaccurate knowledge from some domain-specific documents. (3) Hallucination phenomenon cannot be overlooked when directly using LLMs to construct KGs. In this paper, we propose \textbf{GraphJudge}, a KG construction framework to address the aforementioned challenges. In this framework, we designed an entity-centric strategy to eliminate the noise information in the documents. And we fine-tuned a LLM as a graph judge to finally enhance the quality of generated KGs. Experiments conducted on two general and one domain-specific text-graph pair datasets demonstrate state-of-the-art performance against various baseline methods with strong generalization abilities."
}Markdown (Informal)
[Can LLMs be Good Graph Judge for Knowledge Graph Construction?](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.554/) (Huang et al., EMNLP 2025)
ACL