@inproceedings{zhou-etal-2025-bsfa,
title = "{BSFA}: Leveraging the Subspace Dichotomy to Accelerate Neural Network Training",
author = "Zhou, WenJie and
Wang, Bohan and
Chen, Wei and
Cheng, Xueqi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.952/",
pages = "18845--18860",
ISBN = "979-8-89176-332-6",
abstract = "Recent studies (CITATION) highlight a fundamental dichotomy in deep learning optimization: Although parameter updates along the top eigendirections of the loss Hessian (Dom-space) capture most of the update magnitude, they often contribute minimally to loss reduction. In contrast, updates in the orthogonal component (Bulk-space) have smaller magnitudes but drive most learning progress.In this work, we further advance the understanding of this phenomenon and introduce the \textbf{Bulk-Space-Filtration-Accelerator (BSFA)}, a novel plug-and-play framework. BSFA accelerates training by differentially scaling update components projected onto these distinct subspaces, simultaneously enhancing stability by moderating updates in the dominant subspace and boosting convergence speed by amplifying those in the bulk-space.To ensure BSFA is both practical and scalable for contemporary large models, we introduce two key innovations: an efficient estimator using Principal Component Analysis (PCA) on historical updates for fast subspace estimation, and a block-wise strategy that applies this estimation on a per-parameter-block basis. These designs make BSFA computationally tractable and highly effective.We demonstrate BSFA{'}s acceleration across various tasks, notably achieving approximately 2$\times$ speedup when pre-training LLaMA-72M on WikiText-103 and LLaMA-134M on OpenWebText compared to vanilla AdamW."
}Markdown (Informal)
[BSFA: Leveraging the Subspace Dichotomy to Accelerate Neural Network Training](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.952/) (Zhou et al., EMNLP 2025)
ACL