@inproceedings{honovich-etal-2023-unnatural,
title = "Unnatural Instructions: Tuning Language Models with (Almost) No Human Labor",
author = "Honovich, Or and
Scialom, Thomas and
Levy, Omer and
Schick, Timo",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.806/",
doi = "10.18653/v1/2023.acl-long.806",
pages = "14409--14428",
abstract = "Instruction tuning enables pretrained language models to perform new tasks from inference-time natural language descriptions. These approaches rely on vast amounts of human supervision in the form of crowdsourced datasets or user interactions. In this work, we introduce Unnatural Instructions: a large dataset of creative and diverse instructions, collected with virtually no human labor. We collect 64,000 examples by prompting a language model with three seed examples of instructions and eliciting a fourth. This set is then expanded by prompting the model to rephrase each instruction, creating a total of approximately 240,000 examples of instructions, inputs, and outputs. Experiments show that despite containing a fair amount of noise, training on Unnatural Instructions rivals the effectiveness of training on open-source manually-curated datasets, surpassing the performance of models such as T0++ and Tk-Instruct across various benchmarks. These results demonstrate the potential of model-generated data as a cost-effective alternative to crowdsourcing for dataset expansion and diversification."
}
Markdown (Informal)
[Unnatural Instructions: Tuning Language Models with (Almost) No Human Labor](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.806/) (Honovich et al., ACL 2023)
ACL