@inproceedings{sheth-etal-2025-causalgraph2llm,
title = "{C}ausal{G}raph2{LLM}: Evaluating {LLM}s for Causal Queries",
author = "Sheth, Ivaxi and
Fatemi, Bahare and
Fritz, Mario",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/moar-dois/2025.findings-naacl.110/",
doi = "10.18653/v1/2025.findings-naacl.110",
pages = "2076--2098",
ISBN = "979-8-89176-195-7",
abstract = "Causality is essential in scientific research, enabling researchers to interpret true relationships between variables. These causal relationships are often represented by causal graphs, which are directed acyclic graphs. With the recent advancements in Large Language Models (LLMs), there is an increasing interest in exploring their capabilities in causal reasoning and their potential use to hypothesize causal graphs. These tasks necessitate the LLMs to encode the causal graph effectively for subsequent downstream tasks. In this paper, we introduce \textbf{CausalGraph2LLM}, a comprehensive benchmark comprising over \textit{700k} queries across diverse causal graph settings to evaluate the causal reasoning capabilities of LLMs. We categorize the causal queries into two types: graph-level and node-level queries. We benchmark both open-sourced and closed models for our study. Our findings reveal that while LLMs show promise in this domain, they are highly sensitive to the encoding used. Even capable models like GPT-4 and Gemini-1.5 exhibit sensitivity to encoding, with deviations of about 60{\%}. We further demonstrate this sensitivity for downstream causal intervention tasks. Moreover, we observe that LLMs can often display biases when presented with contextual information about a causal graph, potentially stemming from their parametric memory."
}
Markdown (Informal)
[CausalGraph2LLM: Evaluating LLMs for Causal Queries](https://preview.aclanthology.org/moar-dois/2025.findings-naacl.110/) (Sheth et al., Findings 2025)
ACL
- Ivaxi Sheth, Bahare Fatemi, and Mario Fritz. 2025. CausalGraph2LLM: Evaluating LLMs for Causal Queries. In Findings of the Association for Computational Linguistics: NAACL 2025, pages 2076–2098, Albuquerque, New Mexico. Association for Computational Linguistics.