@inproceedings{sheng-etal-2025-dynamic,
title = "Dynamic Chunking and Selection for Reading Comprehension of Ultra-Long Context in Large Language Models",
author = "Sheng, Boheng and
Yao, Jiacheng and
Zhang, Meicong and
He, Guoxiu",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.acl-long.1538/",
pages = "31857--31876",
ISBN = "979-8-89176-251-0",
abstract = "Large language models (LLMs) often struggle to accurately read and comprehend extremely long texts. Current methods for improvement typically rely on splitting long contexts into fixed-length chunks. However, fixed truncation risks separating semantically relevant content, leading to ambiguity and compromising accurate understanding. To overcome this limitation, we propose a straightforward approach for dynamically separating and selecting chunks of long context, facilitating a more streamlined input for LLMs. In particular, we compute semantic similarities between adjacent sentences, using lower similarities to adaptively divide long contexts into variable-length chunks. We further train a question-aware classifier to select sensitive chunks that are critical for answering specific questions. Experimental results on both single-hop and multi-hop question-answering benchmarks show that the proposed approach consistently outperforms strong baselines. Notably, it maintains robustness across a wide range of input lengths, handling sequences of up to 256k tokens. Our datasets and code are available at the following link: https://github.com/ECNU-Text-Computing/DCS"
}
Markdown (Informal)
[Dynamic Chunking and Selection for Reading Comprehension of Ultra-Long Context in Large Language Models](https://preview.aclanthology.org/landing_page/2025.acl-long.1538/) (Sheng et al., ACL 2025)
ACL