@inproceedings{nafee-etal-2025-dynamic,
title = "Dynamic Retriever for In-Context Knowledge Editing via Policy Optimization",
author = "Nafee, Mahmud Wasif and
Jiang, Maiqi and
Chen, Haipeng and
Zhang, Yanfu",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.848/",
doi = "10.18653/v1/2025.emnlp-main.848",
pages = "16755--16768",
ISBN = "979-8-89176-332-6",
abstract = "Large language models (LLMs) excel at factual recall yet still propagate stale or incorrect knowledge. In{-}context knowledge editing offers a gradient-free remedy suitable for black-box APIs, but current editors rely on static demonstration sets chosen by surface-level similarity, leading to two persistent obstacles: (i) a quantity{--}quality trade-off, and (ii) lack of adaptivity to task difficulty. We address these issues by dynamically selecting supporting demonstrations according to their utility for the edit. We propose **D**ynamic **R**etriever for **I**n-Context **K**nowledge **E**diting (DR-IKE), a lightweight framework that (1) trains a BERT retriever with REINFORCE to rank demonstrations by editing reward, and (2) employs a *learnable threshold {\ensuremath{\sigma}}* to prune low-value examples, shortening the prompt when the edit is easy and expanding it when the task is hard. DR-IKE performs editing without modifying model weights, relying solely on forward passes for compatibility with black-box LLMs. On the CounterFact benchmark, it improves edit success by up to 17.1{\%}, reduces latency by 41.6{\%}, and preserves accuracy on unrelated queries{---}demonstrating scalable and adaptive knowledge editing."
}Markdown (Informal)
[Dynamic Retriever for In-Context Knowledge Editing via Policy Optimization](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.848/) (Nafee et al., EMNLP 2025)
ACL