@inproceedings{cecchi-babkin-2024-reportgpt,
title = "{R}eport{GPT}: Human-in-the-loop Verifiable Table-to-Text Generation",
author = "Cecchi, Lucas and
Babkin, Petr",
editor = "Dernoncourt, Franck and
Preo{\c{t}}iuc-Pietro, Daniel and
Shimorina, Anastasia",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2024",
address = "Miami, Florida, US",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-industry.39/",
doi = "10.18653/v1/2024.emnlp-industry.39",
pages = "529--537",
abstract = "Recent developments in the quality and accessibility of large language models have precipitated a surge in user-facing tools for content generation. Motivated by a necessity for human quality control of these systems, we introduce ReportGPT: a pipeline framework for verifiable human-in-the-loop table-to-text generation. ReportGPT is based on a domain specific language, which acts as a proof mechanism for generating verifiable commentary. This allows users to quickly check the relevancy and factuality of model outputs. User selections then become few-shot examples for improving the performance of the pipeline. We configure 3 approaches to our pipeline, and find that usage of language models in ReportGPT{'}s components trade off precision for more insightful downstream commentary. Furthermore, ReportGPT learns from human feedback in real-time, needing only a few samples to improve performance."
}
Markdown (Informal)
[ReportGPT: Human-in-the-loop Verifiable Table-to-Text Generation](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-industry.39/) (Cecchi & Babkin, EMNLP 2024)
ACL