@inproceedings{zou-etal-2025-many,
title = "On Many-Shot In-Context Learning for Long-Context Evaluation",
author = "Zou, Kaijian and
Khalifa, Muhammad and
Wang, Lu",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1245/",
pages = "25605--25639",
ISBN = "979-8-89176-251-0",
abstract = "Many-shot in-context learning (ICL) has emerged as a unique setup to both utilize and test the ability of large language models to handle long context. This paper delves into long-context language model (LCLM) evaluation through many-shot ICL. We first ask: what types of ICL tasks benefit from additional demonstrations, and how effective are they in evaluating LCLMs? We find that classification and summarization tasks show performance improvements with additional demonstrations, while translation and reasoning tasks do not exhibit clear trends. Next, we investigate the extent to which different tasks necessitate retrieval versus global context understanding. We develop metrics to categorize ICL tasks into two groups: (i) similar-sample learning (SSL): tasks where retrieval of the most similar examples is sufficient for good performance, and (ii) all-sample learning (ASL): tasks that necessitate a deeper comprehension of all examples in the prompt. Lastly, we introduce a new many-shot ICL benchmark built on existing ICL tasks, MANYICLBENCH, to characterize model{'}s ability on both fronts and benchmark 12 LCLMs using MANYICLBENCH. We find that while state-of-the-art models demonstrate good performance up to 64k tokens in SSL tasks, many models experience significant performance drops at only 16k tokens in ASL tasks."
}
Markdown (Informal)
[On Many-Shot In-Context Learning for Long-Context Evaluation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1245/) (Zou et al., ACL 2025)
ACL
- Kaijian Zou, Muhammad Khalifa, and Lu Wang. 2025. On Many-Shot In-Context Learning for Long-Context Evaluation. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 25605–25639, Vienna, Austria. Association for Computational Linguistics.