@inproceedings{warczynski-etal-2024-leveraging-large,
title = "Leveraging Large Language Models for Building Interpretable Rule-Based Data-to-Text Systems",
author = "Warczy{\'n}ski, J{\k{e}}drzej and
Lango, Mateusz and
Dusek, Ondrej",
editor = "Mahamood, Saad and
Minh, Nguyen Le and
Ippolito, Daphne",
booktitle = "Proceedings of the 17th International Natural Language Generation Conference",
month = sep,
year = "2024",
address = "Tokyo, Japan",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.inlg-main.48/",
pages = "622--630",
abstract = "We introduce a simple approach that uses a large language model (LLM) to automatically implement a fully interpretable rule-based data-to-text system in pure Python. Experimental evaluation on the WebNLG dataset showed that such a constructed system produces text of better quality (according to the BLEU and BLEURT metrics) than the same LLM prompted to directly produce outputs, and produces fewer hallucinations than a BART language model fine-tuned on the same data. Furthermore, at runtime, the approach generates text in a fraction of the processing time required by neural approaches, using only a single CPU."
}
Markdown (Informal)
[Leveraging Large Language Models for Building Interpretable Rule-Based Data-to-Text Systems](https://preview.aclanthology.org/fix-sig-urls/2024.inlg-main.48/) (Warczyński et al., INLG 2024)
ACL