@inproceedings{xiong-etal-2025-fine,
title = "Is Fine-Tuning an Effective Solution? Reassessing Knowledge Editing for Unstructured Data",
author = "Xiong, Hao and
Tan, Chuanyuan and
Chen, Wenliang",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.74/",
pages = "1348--1361",
ISBN = "979-8-89176-298-5",
abstract = "Unstructured Knowledge Editing (UKE) is crucial for updating the relevant knowledge of large language models (LLMs). It focuses on unstructured inputs, such as long or free-form texts, which are common forms of real-world knowledge. Although previous studies have proposed effective methods and tested them, some issues exist: (1) Lack of Locality evaluation for UKE, and (2) Abnormal failure of fine-tuning (FT) based methods for UKE.To address these issues, we first construct two datasets, UnKEBench-Loc and AKEW-Loc (CF), by extending two existing UKE datasets with locality test data from the unstructured and structured views. This enables a systematic evaluation of the Locality of post-edited models. Furthermore, we identify four factors that may affect the performance of FT-based methods. Based on these factors, we conduct experiments to determine how the well-performing FT-based methods should be trained for the UKE task, providing a training recipe for future research. Our experimental results indicate that the FT-based method with the optimal setting (FT-UKE) is surprisingly strong, outperforming the existing state-of-the-art (SOTA).In batch editing scenarios, FT-UKE shows strong performance as well, with its advantage over SOTA methods increasing as the batch size grows, expanding the average metric lead from +6.78{\%} to +10.80{\%}. Our code and data will be released on Github."
}Markdown (Informal)
[Is Fine-Tuning an Effective Solution? Reassessing Knowledge Editing for Unstructured Data](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.74/) (Xiong et al., IJCNLP-AACL 2025)
ACL
- Hao Xiong, Chuanyuan Tan, and Wenliang Chen. 2025. Is Fine-Tuning an Effective Solution? Reassessing Knowledge Editing for Unstructured Data. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 1348–1361, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.