@article{li-etal-2025-keft,
title = "{KEFT}: Knowledge-Enhanced Fine-Tuning for Large Language Models in Domain-Specific Question Answering",
author = "Li, Haiyun and
Zhang, Jixin and
Shen, Hua and
Cheng, Ke and
Huang, Xiaofeng",
journal = "Transactions of the Association for Computational Linguistics",
volume = "13",
year = "2025",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/fix-opsupmap-display/2025.tacl-1.49/",
doi = "10.1162/tacl.a.31",
pages = "1056--1067",
abstract = "The rapid advancement of large language models (LLMs) has opened up promising opportunities for their downstream applications in question-answering (QA), such as ChatGPT, ChatGLM, etc. However, such LLMs do not perform very well in domain-specific QA tasks without fine-tuning. But directly fine-tuning LLMs on domain-specific corpus data may lead to catastrophic forgetting, causing the LLMs to lose their general language capability. To address this problem, we propose the Knowledge-Enhanced Fine-Tuning (KEFT) method, an unsupervised fine-tuning approach to enhance the knowledge capability of LLMs in domain-specific QA tasks while preserving their general language capability. KEFT leverages the inherent language comprehension of pre-trained LLMs to generate synthetic-QA datasets from domain-specific corpus data autonomously for fine-tuning, and adopts a Low-Rank Adaptation (LoRA) method to further alleviate over-fitting. Furthermore, to enhance the representation of domain-specific knowledge, we introduce a knowledge-enhanced fine-tuning loss function, which encourages the model to learn the knowledge-question connection, thereby generating natural and knowledgeable answers. Our evaluations across multiple domain-specific datasets demonstrate that KEFT surpasses state-of-the-art fine-tuning approaches, enhancing the performance of various LLMs in QA tasks in both English and Chinese languages."
}Markdown (Informal)
[KEFT: Knowledge-Enhanced Fine-Tuning for Large Language Models in Domain-Specific Question Answering](https://preview.aclanthology.org/fix-opsupmap-display/2025.tacl-1.49/) (Li et al., TACL 2025)
ACL