@inproceedings{taguchi-etal-2025-efficient,
title = "Efficient Context Selection for Long-Context {QA}: No Tuning, No Iteration, Just Adaptive{-}$k$",
author = "Taguchi, Chihiro and
Maekawa, Seiji and
Bhutani, Nikita",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.1017/",
doi = "10.18653/v1/2025.emnlp-main.1017",
pages = "20116--20141",
ISBN = "979-8-89176-332-6",
abstract = "Retrieval-augmented generation (RAG) and long-context language models (LCLMs) both address context limitations of LLMs in open-domain QA. However, optimal external context to retrieve remains an open problem: fixed retrieval budgets risk wasting tokens or omitting key evidence. Existing adaptive methods like Self-RAG and Self-Route rely on iterative LLM prompting and perform well on factoid QA, but struggle with aggregation QA where optimal context size is unknown and variable. We present Adaptive{-}$k$ retrieval, a simple and effective single-pass method that selects a query-specific number of passages by applying a threshold to the similarity scores between the query and candidate passages. It does not require model fine-tuning, extra LLM calls or changes to existing retriever{--}reader pipelines. On both factoid and aggregation QA benchmarks, Adaptive{-}$k$ matches or outperforms fixed{-}$k$ baselines while using up to 10x fewer tokens than full-context input, and still retrieves 70{\%} of relevant passages. It improves accuracy across five LCLMs and two embedding models, highlighting that dynamically adjusting context size leads to more efficient and accurate QA."
}Markdown (Informal)
[Efficient Context Selection for Long-Context QA: No Tuning, No Iteration, Just Adaptive‐k](https://preview.aclanthology.org/name-variant-enfa-fane/2025.emnlp-main.1017/) (Taguchi et al., EMNLP 2025)
ACL