@inproceedings{qiu-etal-2025-continual,
title = "Continual Learning Using Only Large Language Model Prompting",
author = "Qiu, Jiabao and
Ke, Zixuan and
Liu, Bing",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.402/",
pages = "6014--6023",
abstract = "We introduce CLOB, a novel continual learning (CL) paradigm wherein a large language model (LLM) is regarded as a black box. Learning is done incrementally via only verbal prompting. CLOB does not fine-tune any part of the LLM or add any trainable parameters to it. It is particularly suitable for LLMs that are accessible via APIs. We also propose a new CL technique, called CIS, based on incremental summarization that also overcomes the LLM`s input length limit. Experiments show CIS outperforms baselines by a very large margin."
}
Markdown (Informal)
[Continual Learning Using Only Large Language Model Prompting](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.402/) (Qiu et al., COLING 2025)
ACL