@inproceedings{cui-wang-2024-ada,
title = "{A}da-Instruct: Adapting Instruction Generators for Complex Reasoning",
author = "Cui, Wanyun and
Wang, Qianle",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.409/",
doi = "10.18653/v1/2024.findings-emnlp.409",
pages = "6967--6984",
abstract = "Instructions augmentation is a crucial step for unleashing the full potential of large language models (LLMs) in downstream tasks. Existing Self-Instruct methods primarily simulate new instructions from a few initial instructions with in-context learning. However, our study identifies a critical flaw in this approach: even with GPT4o, it cannot generate complex instructions of length $\ge 100$, which is necessary in complex tasks such as code completion.To address this issue, our key insight is that fine-tuning open source LLMs with only \textit{ten examples} can produce complex instructions that maintain distributional consistency for complex reasoning tasks. We introduce Ada-Instruct, an adaptive instruction generator developed through fine-tuning. We empirically validated Ada-Instruct`s efficacy across different applications. The results highlight Ada-Instruct`s capacity to generate long, intricate, and distributionally consistent instructions."
}
Markdown (Informal)
[Ada-Instruct: Adapting Instruction Generators for Complex Reasoning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-emnlp.409/) (Cui & Wang, Findings 2024)
ACL