@inproceedings{fehlauer-etal-2025-convergence,
title = "Convergence and Divergence of Language Models under Different Random Seeds",
author = "Fehlauer, Finlay and
Mahowald, Kyle and
Pimentel, Tiago",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1675/",
pages = "32970--32979",
ISBN = "979-8-89176-332-6",
abstract = "In this paper, we investigate the convergence of language models (LMs) trained under different random seeds, measuring convergence as the expected per-token Kullback{--}Leibler (KL) divergence across seeds. By comparing LM convergence as a function of model size and training checkpoint, we identify a four-phase convergence pattern: (i) an initial $\textbf{uniform phase}$, (ii) a $\textbf{sharp-convergence phase}$, (iii) a $\textbf{sharp-divergence phase}$, and (iv) a $\textbf{slow-reconvergence phase}$. Further, we observe that larger models reconverge faster in later training stages, while smaller models never actually reconverge; these results suggest that a certain model size may be necessary to learn stable distributions. Restricting our analysis to specific token frequencies, or part-of-speech (PoS) tags further reveals that convergence is uneven across linguistic categories: frequent tokens and function words converge faster and more reliably than their counterparts (infrequent tokens and content words). Overall, our findings highlight factors that influence the stability of the learned distributions in model training."
}Markdown (Informal)
[Convergence and Divergence of Language Models under Different Random Seeds](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1675/) (Fehlauer et al., EMNLP 2025)
ACL