@inproceedings{li-qiu-2023-finding,
title = "Finding Support Examples for In-Context Learning",
author = "Li, Xiaonan and
Qiu, Xipeng",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.411/",
doi = "10.18653/v1/2023.findings-emnlp.411",
pages = "6219--6235",
abstract = "In-context learning is a new learning paradigm where a language model observes a few examples and directly outputs the test input`s prediction. Previous works have shown that it is sensitive to the provided examples and randomly sampled examples probably cause inferior performance. In this paper, we propose finding {\textquotedblleft}support examples{\textquotedblright} for in-context learning: Given a training dataset, it aims to select one permutation of a few examples, which can well characterize the task for in-context learning and thus lead to superior performance. Although for traditional gradient-based training, there are extensive methods to find a coreset from the entire dataset, they struggle to find important in-context examples, because in-context learning occurs in the language model`s forward process without gradients or parameter updates and thus has a significant gap with traditional training. Additionally, the strong dependence among in-context examples makes it an NP-hard combinatorial optimization problem and enumerating all permutations is infeasible. Hence we propose **LENS**, a fi**L**ter-th**EN**-**S**earch method to tackle this challenge in two stages: irst we filter the dataset to obtain individually informative in-context examples. Specifically, we propose a novel metric, InfoScore, to evaluate the example`s in-context informativeness based on the language model`s feedback, and further propose a progressive filtering process to filter out uninformative examples. Then we propose diversity-guided example search which iteratively refines and evaluates the selected example permutations, to find examples that fully depict the task. The experimental results show that LENS significantly outperforms a wide range of baselines and further analyses show that each component contribute critically to the improvements and shed light on the principles of supporting examples and in-context learning."
}
Markdown (Informal)
[Finding Support Examples for In-Context Learning](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.411/) (Li & Qiu, Findings 2023)
ACL