@inproceedings{wei-etal-2025-paft,
title = "{PAFT}: Prompt-Agnostic Fine-Tuning",
author = "Wei, Chenxing and
Shu, Yao and
Ou, Mingwen and
He, Ying and
Yu, Fei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.37/",
doi = "10.18653/v1/2025.emnlp-main.37",
pages = "694--717",
ISBN = "979-8-89176-332-6",
abstract = "Fine-tuning large language models (LLMs) often causes overfitting to specific prompt wording, where minor phrasing variations drastically reduce performance. To address this, we propose Prompt-Agnostic Fine-Tuning (PAFT), a method that enhances robustness through dynamic prompt variation during training. PAFT first generates diverse synthetic prompts, then continuously samples from this set to construct training instances, forcing models to learn fundamental task principles rather than surface-level patterns. Across systematic evaluations using both supervised fine-tuning (SFT) and reinforcement learning fine-tuning (RLFT), PAFT consistently demonstrates improved performance on benchmarks for question answering, mathematical reasoning, and tool use. It achieves 7{\%} higher generalization accuracy on unseen prompts than standard methods with similar training efficiency. Notably, models trained with PAFT attain 3.2{\texttimes} faster inference speeds due to reduced prompt sensitivity. Ablation studies further validate effectiveness of PAFT, while theoretical analysis reveals that PAFT can effectively enhance the cross-domain generalization ability of LLM."
}Markdown (Informal)
[PAFT: Prompt-Agnostic Fine-Tuning](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.37/) (Wei et al., EMNLP 2025)
ACL
- Chenxing Wei, Yao Shu, Mingwen Ou, Ying He, and Fei Yu. 2025. PAFT: Prompt-Agnostic Fine-Tuning. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 694–717, Suzhou, China. Association for Computational Linguistics.