@inproceedings{kim-etal-2023-fineprompt,
title = "{F}ine{P}rompt: Unveiling the Role of Finetuned Inductive Bias on Compositional Reasoning in {GPT}-4",
author = "Kim, Jeonghwan and
Hong, Giwon and
Myaeng, Sung-Hyon and
Whang, Joyce",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.245/",
doi = "10.18653/v1/2023.findings-emnlp.245",
pages = "3763--3775",
abstract = "Compositional reasoning across texts has been a long-standing challenge in natural language processing. With large language models like GPT-4 taking over the field, prompting techniques such as chain-of-thought (CoT) were proposed to unlock compositional, multi-step reasoning capabilities of LLMs. Despite their success, the prompts demand significant human effort to discover and validate them. Our work draws attention to the idea of transferring task-specific inductive biases from finetuned models to prompts, as a way of improving GPT-4{'}s compositional reasoning capabilities. To leverage these inductive biases, we formulate prompt templates to ease the transfer of inductive biases. The experimental results on multi-hop question answering and numerical reasoning over text show that our proposed prompt scheme shows competitive zero-shot and few-shot performances compared to existing prompts on complicated reasoning tasks, highlighting the importance of adopting the validated biases of the previous paradigm."
}
Markdown (Informal)
[FinePrompt: Unveiling the Role of Finetuned Inductive Bias on Compositional Reasoning in GPT-4](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.245/) (Kim et al., Findings 2023)
ACL