@inproceedings{zhang-etal-2025-prime,
title = "{PRIME}: Large Language Model Personalization with Cognitive Dual-Memory and Personalized Thought Process",
author = "Zhang, Xinliang Frederick and
Beauchamp, Nicholas and
Wang, Lu",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1711/",
pages = "33695--33724",
ISBN = "979-8-89176-332-6",
abstract = "Large language model (LLM) personalization aims to align model outputs with individuals' unique preferences and opinions. While recent efforts have implemented various personalization methods, a unified theoretical framework that can systematically understand the drivers of effective personalization is still lacking. In this work, we integrate the well-established cognitive dual-memory model into LLM personalization, by mirroring episodic memory to historical user engagements and semantic memory to long-term, evolving user beliefs. Specifically, we systematically investigate memory instantiations and introduce a unified framework, PRIME, using episodic and semanticmemory mechanisms. We further augment PRIME with a novel personalized thinking capability inspired by the slow thinking strategy. Moreover, recognizing the absence of suitable benchmarks, we introduce a dataset using Change My View (CMV) from Reddit, specifically designed to evaluate long-context personalization. Extensive experiments validate PRIME{'}s effectiveness across both long- and short-context scenarios. Further analysis confirms that PRIME effectively captures dynamic personalization beyond mere popularity biases."
}Markdown (Informal)
[PRIME: Large Language Model Personalization with Cognitive Dual-Memory and Personalized Thought Process](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1711/) (Zhang et al., EMNLP 2025)
ACL