@inproceedings{li-etal-2025-paraicl,
title = "{P}ara{ICL}: Towards Parallel In-Context Learning",
author = "Li, Xingxuan and
Nguyen, Xuan-Phi and
Joty, Shafiq and
Bing, Lidong",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.naacl-long.621/",
pages = "12501--12511",
ISBN = "979-8-89176-189-6",
abstract = "Large language models (LLMs) have become the norm in natural language processing (NLP), excelling in few-shot in-context learning (ICL) with their remarkable abilities. Nonetheless, the success of ICL largely hinges on the choice of few-shot demonstration examples, making the selection process increasingly crucial. Existing methods have delved into optimizing the quantity and semantic similarity of these examples to improve ICL performances. However, our preliminary experiments indicate that the effectiveness of ICL is limited by the length of the input context. Moreover, varying combinations of few-shot demonstration examples can significantly boost accuracy across different test samples. To address this, we propose a novel method named parallel in-context learning (ParaICL) that effectively utilizes all demonstration examples without exceeding the manageable input context length. ParaICL employs parallel batching to distribute demonstration examples into different batches according to the semantic similarities of the questions in the demonstrations to the test question. It then computes normalized batch semantic scores for each batch. A weighted average semantic objective, constrained by adaptive plausibility, is applied to select the most appropriate tokens. Through extensive experiments, we validate the effectiveness of ParaICL and conduct ablation studies to underscore its design rationale. We further demonstrate that ParaICL can seamlessly integrate with existing methods."
}
Markdown (Informal)
[ParaICL: Towards Parallel In-Context Learning](https://preview.aclanthology.org/landing_page/2025.naacl-long.621/) (Li et al., NAACL 2025)
ACL
- Xingxuan Li, Xuan-Phi Nguyen, Shafiq Joty, and Lidong Bing. 2025. ParaICL: Towards Parallel In-Context Learning. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 12501–12511, Albuquerque, New Mexico. Association for Computational Linguistics.