@inproceedings{sun-dredze-2025-amuro,
title = "Amuro {\&} Char: Analyzing the Relationship between Pre-Training and Fine-Tuning of Large Language Models",
author = "Sun, Kaiser and
Dredze, Mark",
editor = "Adlakha, Vaibhav and
Chronopoulou, Alexandra and
Li, Xiang Lorraine and
Majumder, Bodhisattwa Prasad and
Shi, Freda and
Vernikos, Giorgos",
booktitle = "Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)",
month = may,
year = "2025",
address = "Albuquerque, NM",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.repl4nlp-1.11/",
pages = "131--151",
ISBN = "979-8-89176-245-9",
abstract = "Large language model development relies on the pre-train-then-align paradigm, in which the model is typically pre-trained on a large text corpus and undergoes a tuning stage to align the model with human preference or downstream tasks. We investigate the relationship between pre-training and supervised fine-tuning by considering multiple tasks as well as different pre-trained model checkpoints. Our results on 18 datasets and two models suggest that i) although the model benefits significantly through supervised fine-tuning, it may forget previously known domain knowledge and tasks that are not seen during fine-tuning; ii) the model exhibits high sensitivity to evaluation prompts after supervised fine-tuning, but this sensitivity can be alleviated through further pre-training; iii) continual pre-training improves the model in a latent way that manifests after fine-tuning; iv) The model can already solve some tasks after pre-training while fine-tuning most benefits datasets where the model does not show capability during pre-training."
}
Markdown (Informal)
[Amuro & Char: Analyzing the Relationship between Pre-Training and Fine-Tuning of Large Language Models](https://preview.aclanthology.org/fix-sig-urls/2025.repl4nlp-1.11/) (Sun & Dredze, RepL4NLP 2025)
ACL