@inproceedings{wu-etal-2025-exploring,
title = "Exploring the Potential of Large Language Models for Heterophilic Graphs",
author = "Wu, Yuxia and
Li, Shujie and
Fang, Yuan and
Shi, Chuan",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2025.naacl-long.269/",
pages = "5198--5211",
ISBN = "979-8-89176-189-6",
abstract = "Large language models (LLMs) have presented significant opportunities to enhance various machine learning applications, including graph neural networks (GNNs). By leveraging the vast open-world knowledge within LLMs, we can more effectively interpret and utilize textual data to better characterize heterophilic graphs, where neighboring nodes often have different labels. However, existing approaches for heterophilic graphs overlook the rich textual data associated with nodes, which could unlock deeper insights into their heterophilic contexts. In this work, we explore the potential of LLMs for modeling heterophilic graphs and propose a novel two-stage framework: LLM-enhanced edge discriminator and LLM-guided edge reweighting. In the first stage, we fine-tune the LLM to better identify homophilic and heterophilic edges based on the textual content of their nodes. In the second stage, we adaptively manage message propagation in GNNs for different edge types based on node features, structures, and heterophilic or homophilic characteristics. To cope with the computational demands when deploying LLMs in practical scenarios, we further explore model distillation techniques to fine-tune smaller, more efficient models that maintain competitive performance. Extensive experiments validate the effectiveness of our framework, demonstrating the feasibility of using LLMs to enhance node classification on heterophilic graphs."
}
Markdown (Informal)
[Exploring the Potential of Large Language Models for Heterophilic Graphs](https://preview.aclanthology.org/Author-page-Marten-During-lu/2025.naacl-long.269/) (Wu et al., NAACL 2025)
ACL
- Yuxia Wu, Shujie Li, Yuan Fang, and Chuan Shi. 2025. Exploring the Potential of Large Language Models for Heterophilic Graphs. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5198–5211, Albuquerque, New Mexico. Association for Computational Linguistics.