@inproceedings{zheng-etal-2025-long,
title = "When Long Helps Short: How Context Length in Supervised Fine-tuning Affects Behavior of Large Language Models",
author = "Zheng, Yingming and
Li, Hanqi and
Yu, Kai and
Chen, Lu",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.522/",
pages = "10304--10319",
ISBN = "979-8-89176-332-6",
abstract = "Large language models (LLMs) have achieved impressive performance across natural language processing (NLP) tasks. As real-world applications increasingly demand longer context windows, continued pretraining and supervised fine-tuning (SFT) on long-context data has become a common approach. While the effects of data length in continued pretraining have been extensively studied, their implications for SFT remain unclear. In this work, we systematically investigate how SFT data length influences LLM behavior on short-context tasks. Counterintuitively, we find that long-context SFT improves short-context performance, contrary to the commonly observed degradation from long-context pretraining. To uncover the underlying mechanisms of this phenomenon, we first decouple and analyze two key components, Multi-Head Attention (MHA) and Feed-Forward Network (FFN), and show that both independently benefit from long-context SFT. We further study their interaction and reveal a knowledge preference bias: long-context SFT promotes contextual knowledge, while short-context SFT favors parametric knowledge, making exclusive reliance on long-context SFT suboptimal. Finally, we demonstrate that hybrid training mitigates this bias, offering explainable guidance for fine-tuning LLMs."
}Markdown (Informal)
[When Long Helps Short: How Context Length in Supervised Fine-tuning Affects Behavior of Large Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.522/) (Zheng et al., EMNLP 2025)
ACL