@inproceedings{dabramo-etal-2025-investigating,
title = "Investigating Large Language Models for Text-to-{SPARQL} Generation",
author = "D{'}Abramo, Jacopo and
Zugarini, Andrea and
Torroni, Paolo",
editor = "Shi, Weijia and
Yu, Wenhao and
Asai, Akari and
Jiang, Meng and
Durrett, Greg and
Hajishirzi, Hannaneh and
Zettlemoyer, Luke",
booktitle = "Proceedings of the 4th International Workshop on Knowledge-Augmented Methods for Natural Language Processing",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.knowledgenlp-1.5/",
pages = "66--80",
ISBN = "979-8-89176-229-9",
abstract = "Large Language Models (LLMs) have demonstrated strong capabilities in code generation, such as translating natural language questions into SQL queries. However, state-of-the-art solutions often involve a costly fine-tuning step. In this study, we extensively evaluate In-Context Learning (ICL) solutions for text-to-SPARQL generation with different architectures and configurations, based on methods for retrieving relevant demonstrations for few-shot prompting and working with multiple generated hypotheses. In this way, we demonstrate that LLMs can formulate SPARQL queries achieving state-of-the-art results on several Knowledge Graph Question Answering (KGQA) benchmark datasets without fine-tuning."
}
Markdown (Informal)
[Investigating Large Language Models for Text-to-SPARQL Generation](https://preview.aclanthology.org/fix-sig-urls/2025.knowledgenlp-1.5/) (D’Abramo et al., KnowledgeNLP 2025)
ACL