@inproceedings{wang-etal-2023-lets,
title = "Let`s Synthesize Step by Step: Iterative Dataset Synthesis with Large Language Models by Extrapolating Errors from Small Models",
author = "Wang, Ruida and
Zhou, Wangchunshu and
Sachan, Mrinmaya",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.791/",
doi = "10.18653/v1/2023.findings-emnlp.791",
pages = "11817--11831",
abstract = "*Data Synthesis* is a promising way to train a small model with very little labeled data. One approach for data synthesis is to leverage the rich knowledge from large language models to synthesize pseudo training examples for small models, making it possible to achieve both data and compute efficiency at the same time. However, a key challenge in data synthesis is that the synthesized dataset often suffers from a large distributional discrepancy from the *real task* data distribution. Thus, in this paper, we propose *Synthesis Step by Step* (**S3**), a data synthesis framework that shrinks this distribution gap by iteratively extrapolating the errors made by a small model trained on the synthesized dataset on a small real-world validation dataset using a large language model. Extensive experiments on multiple NLP tasks show that our approach improves the performance of a small model by reducing the gap between the synthetic dataset and the real data, resulting in significant improvement compared to several baselines: 9.48{\%} improvement compared to ZeroGen and 2.73{\%} compared to GoldGen, and at most 15.17{\%} improvement compared to the small model trained on human-annotated data."
}
Markdown (Informal)
[Let’s Synthesize Step by Step: Iterative Dataset Synthesis with Large Language Models by Extrapolating Errors from Small Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.791/) (Wang et al., Findings 2023)
ACL