@inproceedings{wang-etal-2022-iteratively,
title = "Iteratively Prompt Pre-trained Language Models for Chain of Thought",
author = "Wang, Boshi and
Deng, Xiang and
Sun, Huan",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.emnlp-main.174/",
doi = "10.18653/v1/2022.emnlp-main.174",
pages = "2714--2730",
abstract = "While Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex {\&} multi-step reasoning. Similar to how humans develop a {\textquotedblleft}chain of thought{\textquotedblright} for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step`s contexts. Experiments on three datasets involving multi-step reasoning show the effectiveness of the iterative scheme and the context-aware prompter design."
}
Markdown (Informal)
[Iteratively Prompt Pre-trained Language Models for Chain of Thought](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.emnlp-main.174/) (Wang et al., EMNLP 2022)
ACL