@inproceedings{hong-etal-2025-dp,
title = "{DP}-{FROST}: Differentially Private Fine-tuning of Pre-trained Models with Freezing Model Parameters",
author = "Hong, Daeyoung and
Jung, Woohwan and
Shim, Kyuseok",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.465/",
pages = "6966--6984",
abstract = "Training models with differential privacy has received a lot of attentions since differential privacy provides theoretical guarantee of privacy preservation. For a task in a specific domain, since a large-scale pre-trained model in the same domain contains general knowledge of the task, using such a model requires less effort in designing and training the model. However, differentially privately fine-tuning such models having a large number of trainable parameters results in large degradation of utility. Thus, we propose methods that effectively fine-tune the large-scale pre-trained models with freezing unimportant parameters for downstream tasks while satisfying differential privacy. To select the parameters to be fine-tuned, we propose several efficient methods based on the gradients of model parameters. We show the effectiveness of the proposed method by performing experiments with real datasets."
}
Markdown (Informal)
[DP-FROST: Differentially Private Fine-tuning of Pre-trained Models with Freezing Model Parameters](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.465/) (Hong et al., COLING 2025)
ACL