@inproceedings{lee-etal-2025-curriculum,
title = "Curriculum Debiasing: Toward Robust Parameter-Efficient Fine-Tuning Against Dataset Biases",
author = "Lee, Mingyu and
Kim, Yeachan and
Mok, Wing-Lam and
Lee, SangKeun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.469/",
pages = "9524--9540",
ISBN = "979-8-89176-251-0",
abstract = "Parameter-efficient fine-tuning (PEFT) addresses the memory footprint issue of full fine-tuning by modifying only a subset of model parameters. However, on datasets exhibiting spurious correlations, we observed that PEFT slows down the model{'}s convergence on unbiased examples, while the convergence on biased examples remains fast. This leads to the model{'}s overfitting on biased examples, causing significant performance degradation in out-of-distribution (OOD) scenarios. Traditional debiasing methods mitigate this issue by emphasizing unbiased examples during training but often come at the cost of in-distribution (ID) performance drops. To address this trade-off issue, we propose a curriculum debiasing framework that presents examples in a \textit{biased-to-unbiased} order. Our framework initially limits the model{'}s exposure to unbiased examples, which are harder to learn, allowing it to first establish a foundation on easier-to-converge biased examples. As training progresses, we gradually increase the proportion of unbiased examples in the training set, guiding the model away from reliance on spurious correlations. Compared to the original PEFT methods, our method accelerates convergence on unbiased examples by approximately twofold and improves ID and OOD performance by 1.2{\%} and 8.0{\%}, respectively."
}
Markdown (Informal)
[Curriculum Debiasing: Toward Robust Parameter-Efficient Fine-Tuning Against Dataset Biases](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.469/) (Lee et al., ACL 2025)
ACL