@inproceedings{kim-etal-2025-hello,
title = "`Hello, World!': Making {GNN}s Talk with {LLM}s",
author = "Kim, Sunwoo and
Lee, Soo Yong and
Yoo, Jaemin and
Shin, Kijung",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.555/",
doi = "10.18653/v1/2025.findings-emnlp.555",
pages = "10508--10526",
ISBN = "979-8-89176-335-7",
abstract = "While graph neural networks (GNNs) have shown remarkable performance across diverse graph-related tasks, their high-dimensional hidden representations render them black boxes. In this work, we propose Graph Lingual Network (GLN), a GNN built on large language models (LLMs), with hidden representations in the form of human-readable text. Through careful prompt design, GLN incorporates not only the message passing module of GNNs but also advanced GNN techniques, including graph attention and initial residual connection. The comprehensibility of GLN{'}s hidden representations enables an intuitive analysis of how node representations change (1) across layers and (2) under advanced GNN techniques, shedding light on the inner workings of GNNs. Furthermore, we demonstrate that GLN achieves strong zero-shot performance on node classification and link prediction, outperforming existing LLM-based baseline methods."
}Markdown (Informal)
[‘Hello, World!’: Making GNNs Talk with LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.555/) (Kim et al., Findings 2025)
ACL
- Sunwoo Kim, Soo Yong Lee, Jaemin Yoo, and Kijung Shin. 2025. ‘Hello, World!’: Making GNNs Talk with LLMs. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 10508–10526, Suzhou, China. Association for Computational Linguistics.