@inproceedings{sullivan-2025-exploring,
title = "Exploring Graph Representations of Logical Forms for Language Modeling",
author = "Sullivan, Michael",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.635/",
pages = "12285--12307",
ISBN = "979-8-89176-256-5",
abstract = "We make the case for language models over logical forms (LFLMs), arguing that such models are more data-efficient than their textual counterparts. To that end, we introduce the $\underline{G}\textit{raph-based }\underline{Fo}\textit{rmal-}\underline{L}\textit{ogical }\underline{D}\textit{istributional }\underline{S}\textit{emantics}$ (GFoLDS) prototype, a pretrained LM over graph representations of logical forms, as a proof-of-concept of LFLMs. Using GFoLDS, we present strong experimental evidence that LFLMs can leverage the built-in, basic linguistic knowledge inherent in such models to immediately begin learning more complex patterns. On downstream tasks, we show that GFoLDS vastly outperforms textual, transformer LMs (BERT) pretrained on the same data, indicating that LFLMs can learn with substantially less data than models over plain text. Furthermore, we show that the performance of this model is likely to scale with additional parameters and pretraining data, suggesting the viability of LFLMs in real-world applications."
}
Markdown (Informal)
[Exploring Graph Representations of Logical Forms for Language Modeling](https://preview.aclanthology.org/landing_page/2025.findings-acl.635/) (Sullivan, Findings 2025)
ACL