@inproceedings{berger-etal-2025-context,
title = "In-Context Learning on a Budget: A Case Study in Token Classification",
author = "Berger, Uri and
Baumel, Tal and
Stanovsky, Gabriel",
editor = "Drozd, Aleksandr and
Sedoc, Jo{\~a}o and
Tafreshi, Shabnam and
Akula, Arjun and
Shu, Raphael",
booktitle = "The Sixth Workshop on Insights from Negative Results in NLP",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.insights-1.2/",
pages = "7--14",
ISBN = "979-8-89176-240-4",
abstract = "Few shot in-context learning (ICL) typically assumes access to large annotated training sets. However, in many real world scenarios, such as domain adaptation, there is only a limited budget to annotate a small number of samples, with the goal of maximizing downstream performance. We study various methods for selecting samples to annotate within a predefined budget, focusing on token classification tasks, which are expensive to annotate and are relatively less studied in ICL setups. Across various tasks, models, and datasets, we observe that no method significantly outperforms the others, with most yielding similar results, including random sample selection for annotation. Moreover, we demonstrate that a relatively small annotated sample pool can achieve performance comparable to using the entire training set. We hope that future work adopts our realistic paradigm which takes annotation budget into account."
}
Markdown (Informal)
[In-Context Learning on a Budget: A Case Study in Token Classification](https://preview.aclanthology.org/landing_page/2025.insights-1.2/) (Berger et al., insights 2025)
ACL