@inproceedings{zhang-etal-2025-generative,
title = "A Generative Adaptive Replay Continual Learning Model for Temporal Knowledge Graph Reasoning",
author = "Zhang, Zhiyu and
Chen, Wei and
Lin, Youfang and
Wan, Huaiyu",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.537/",
pages = "10964--10977",
ISBN = "979-8-89176-251-0",
abstract = "Recent Continual Learning (CL)-based Temporal Knowledge Graph Reasoning (TKGR) methods focus on significantly reducing computational cost and mitigating catastrophic forgetting caused by fine-tuning models with new data. However, existing CL-based TKGR methods still face two key limitations: (1) They usually one-sidedly reorganize individual historical facts, while overlooking the historical context essential for accurately understanding the historical semantics of these facts; (2) They preserve historical knowledge by simply replaying historical facts, while ignoring the potential conflicts between historical and emerging facts. In this paper, we propose a $\textbf{D}$eep $\textbf{G}$enerative $\textbf{A}$daptive $\textbf{R}$eplay (DGAR) method, which can generate and adaptively replay historical entity distribution representations from the whole historical context. To address the first challenge, historical context prompts as sampling units are built to preserve the whole historical context information. To overcome the second challenge, a pre-trained diffusion model is adopted to generate the historical distribution. During the generation process, the common features between the historical and current distributions are enhanced under the guidance of the TKGR model. In addition, a layer-by-layer adaptive replay mechanism is designed to effectively integrate historical and current distributions. Experimental results demonstrate that DGAR significantly outperforms baselines in reasoning and mitigating forgetting."
}
Markdown (Informal)
[A Generative Adaptive Replay Continual Learning Model for Temporal Knowledge Graph Reasoning](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.537/) (Zhang et al., ACL 2025)
ACL