@inproceedings{chen-zeng-2025-prototype,
title = "Prototype Conditioned Generative Replay for Continual Learning in {NLP}",
author = "Chen, Xi and
Zeng, Min",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.naacl-long.636/",
pages = "12754--12770",
ISBN = "979-8-89176-189-6",
abstract = "Generative replay has proven effective in addressing the catastrophic forgetting issue of continual learning (CL) in natural language processing (NLP). However, relying on a single task-specific token or prompt often falls short in generating pseudo-samples that accurately reflect the true data distribution. This leads to issues of semantic inconsistency and scale inconsistency.To tackle these challenges, we propose a Prototype Conditioned Generative Replay (PCGR) method, which enhances generative reply by incorporating task-level statistics through a Prototype Conditioned Variational Autoencoder (PCVAE).Specifically, task-level embedding statistics are stored as prototypes for each old task. When a new task is introduced, PCVAE draws samples from task-specific prototype-based distributions to generate pseudo-samples.By incorporating the prototype, the generated pseudo-samples are both more representative and sufficiently diverse to reflect the real data distribution.Furthermore, as previously stored prototypes may become outdated due to evolving model parameters, we propose a Prototype Shift Estimation (PSE) to adjust for these changes.Experiments on NLP tasks across two different scenarios show that PCGR outperforms previous state-of-the-art (SOTA) methods."
}
Markdown (Informal)
[Prototype Conditioned Generative Replay for Continual Learning in NLP](https://preview.aclanthology.org/landing_page/2025.naacl-long.636/) (Chen & Zeng, NAACL 2025)
ACL
- Xi Chen and Min Zeng. 2025. Prototype Conditioned Generative Replay for Continual Learning in NLP. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 12754–12770, Albuquerque, New Mexico. Association for Computational Linguistics.