@inproceedings{li-etal-2025-training,
title = "Training Long-Context {LLM}s Efficiently via Chunk-wise Optimization",
author = "Li, Wenhao and
Zhang, Yuxin and
Luo, Gen and
Yu, Daohai and
Ji, Rongrong",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.138/",
pages = "2691--2700",
ISBN = "979-8-89176-256-5",
abstract = "While long-context large language models (LLMs) exhibit remarkable document processing capabilities, their prohibitively high training costs often hinder customized applications. To mitigate this issue, we propose {\_}{\_}Sequential Chunk-wise Optimization (SeCO){\_}{\_}, a memory-efficient training paradigm that partitions lengthy inputs into manageable chunks. Each chunk independently constructs its computational graph and performs localized backpropagation, ensuring that only one chunk{'}s forward activations are stored in memory. Building on SeCO, we further introduce {\_}{\_}Sparse Chunk-wise Optimization (SpaCO){\_}{\_}, which reduces computational overhead by selectively propagating gradients to specific chunks and incorporates a carefully designed compensation factor to ensure unbiased gradient estimation. SpaCO decouples the computational cost of backpropagation from the context length, enabling training time to gradually converge to inference time as sequences become longer. Implemented as lightweight training wrappers, both SeCO and SpaCO offer substantial practical benefits. For example, when fine-tuning an 8B model with LoRA on a single RTX 3090 GPU, SeCO expands maximum sequence length from 1K to 16K tokens, while SpaCO demonstrates accelerated training speed{---}achieving up to 3{\texttimes} faster than SeCO under the same experimental setup. These innovations provide new insights into optimizing long-context models, making them more accessible for practical applications. We have open-sourced the code at https://anonymous.4open.science/r/seco-CCBD."
}
Markdown (Informal)
[Training Long-Context LLMs Efficiently via Chunk-wise Optimization](https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.138/) (Li et al., Findings 2025)
ACL