@inproceedings{zheng-etal-2025-model,
title = "Model Extrapolation Expedites Alignment",
author = "Zheng, Chujie and
Wang, Ziqi and
Ji, Heng and
Huang, Minlie and
Peng, Nanyun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.51/",
pages = "1025--1041",
ISBN = "979-8-89176-251-0",
abstract = "Given the high computational cost of preference alignment training of large language models (LLMs), exploring efficient methods to reduce the training overhead remains an important and compelling research problem. Motivated by the observation that alignment training typically involves only small parameter changes without injecting new knowledge into models, we propose a straightforward method called ExPO (model extrapolation) to expedite LLMs' alignment with human preferences. Given a partially-trained model and its initial SFT checkpoint, ExPO improves the implicit optimization objective of alignment training by simply amplifying the parameter change based on a first-order approximation, without any additional training overhead. Through controlled experiments, we demonstrate that ExPO boosts a DPO model trained with only 20{\%} steps to outperform the fully-trained one. Moreover, we show that ExPO notably improves existing open-source LLMs (ranging from 1.8B to 70B parameters) on the leading AlpacaEval 2.0 and MT-Bench benchmarks, which highlights ExPO{'}s broader utility in efficiently enhancing LLM alignment."
}
Markdown (Informal)
[Model Extrapolation Expedites Alignment](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.51/) (Zheng et al., ACL 2025)
ACL
- Chujie Zheng, Ziqi Wang, Heng Ji, Minlie Huang, and Nanyun Peng. 2025. Model Extrapolation Expedites Alignment. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1025–1041, Vienna, Austria. Association for Computational Linguistics.