@inproceedings{lee-etal-2024-crafting,
title = "Crafting In-context Examples according to {LM}s' Parametric Knowledge",
author = "Lee, Yoonsang and
Atreya, Pranav and
Ye, Xi and
Choi, Eunsol",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.133/",
doi = "10.18653/v1/2024.findings-naacl.133",
pages = "2069--2085",
abstract = "In-context learning can improve the performances of knowledge-rich tasks such as question answering. In such scenarios, in-context examples trigger a language model (LM) to surface information stored in its parametric knowledge. We study how to better construct in-context example sets, based on whether the model is aware of the in-context examples. We identify `known' examples, where models can correctly answer from their parametric knowledge, and `unknown' ones. Our experiments show that prompting with `unknown' examples decreases the performance, potentially as it encourages hallucination rather than searching for its parametric knowledge. Constructing an in-context example set that presents both known and unknown information performs the best across diverse settings. We perform analysis on three multi-answer question answering datasets, which allows us to further study answer set ordering strategies based on the LM{'}s knowledge of each answer. Together, our study sheds light on how to best construct in-context example sets for knowledge-rich tasks."
}
Markdown (Informal)
[Crafting In-context Examples according to LMs’ Parametric Knowledge](https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.133/) (Lee et al., Findings 2024)
ACL