@inproceedings{kim-etal-2025-domix,
title = "{D}o{MIX}: An Efficient Framework for Exploiting Domain Knowledge in Fine-Tuning",
author = "Kim, Dohoon and
Kang, Donghun and
Moon, Taesup",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.710/",
pages = "14581--14602",
ISBN = "979-8-89176-251-0",
abstract = "Domain-Adaptive Pre-training (DAP) has recently gained attention for its effectiveness in fine-tuning pre-trained models. Building on this, continual DAP has been explored to develop pre-trained models capable of incrementally incorporating different domain datasets. However, existing continual DAP methods face several limitations: (1) high computational cost and GPU memory usage during training; (2) sensitivity to incremental data order; and (3) providing a single, generalized model for all end tasks, which contradicts the essence of DAP. In this paper, we propose DoMIX, a novel approach that addresses these challenges by leveraging LoRA modules, a representative parameter-efficient fine-tuning (PEFT) method. Our approach enables efficient and parallel domain-adaptive pre-training that is robust to domain order and effectively utilizes accumulated knowledge to provide tailored pre-trained models for specific tasks.We also demonstrate that our method can be extended beyond the DAP setting to standard LLM fine-tuning scenarios. Code is available at https://github.com/dohoonkim-ai/DoMIX."
}
Markdown (Informal)
[DoMIX: An Efficient Framework for Exploiting Domain Knowledge in Fine-Tuning](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.710/) (Kim et al., ACL 2025)
ACL