@inproceedings{schoch-ji-2025-monte,
title = "{M}onte {C}arlo Sampling for Analyzing In-Context Examples",
author = "Schoch, Stephanie and
Ji, Yangfeng",
editor = "Drozd, Aleksandr and
Sedoc, Jo{\~a}o and
Tafreshi, Shabnam and
Akula, Arjun and
Shu, Raphael",
booktitle = "The Sixth Workshop on Insights from Negative Results in NLP",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.insights-1.7/",
pages = "63--78",
ISBN = "979-8-89176-240-4",
abstract = "Prior works have shown that in-context learning is brittle to presentation factors such as the order, number, and choice of selected examples. However, ablation-based guidance on selecting the number of examples may ignore the interplay between different presentation factors. In this work we develop a Monte Carlo sampling-based method to study the impact of number of examples while explicitly accounting for effects from order and selected examples. We find that previous guidance on how many in-context examples to select does not always generalize across different sets of selected examples and orderings, and whether one-shot settings outperform zero-shot settings is highly dependent on the selected example. Additionally, inspired by data valuation, we apply our sampling method to in-context example selection to select examples that perform well across different orderings. We find a negative result, that while performance is robust to ordering and number of examples, there is an unexpected performance degradation compared to random sampling."
}
Markdown (Informal)
[Monte Carlo Sampling for Analyzing In-Context Examples](https://preview.aclanthology.org/fix-sig-urls/2025.insights-1.7/) (Schoch & Ji, insights 2025)
ACL