@inproceedings{zhang-etal-2025-personalize,
title = "Personalize Your {LLM}: Fake it then Align it",
author = "Zhang, Yijing and
Adila, Dyah and
Shin, Changho and
Sala, Frederic",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.407/",
pages = "7287--7301",
ISBN = "979-8-89176-195-7",
abstract = "Personalizing large language models (LLMs) is essential for delivering tailored interactions that improve user experience. Many existing personalization methods require fine-tuning LLMs for each user, rendering them prohibitively expensive for widespread adoption. Although retrieval-based approaches offer a more compute-efficient alternative, they still depend on large, high-quality datasets that are not consistently available for all users. To address this challenge, we propose Chameleon, a scalable and efficient personalization approach that uses (1) self-generated personal preference data and (2) representation editing to enable quick and cost-effective personalization. Our experiments on various tasks, including those from the LaMP personalization benchmark, show that Chameleon efficiently adapts models to personal preferences, improving instruction-tuned models and outperforms two personalization baselines by an average of 40{\%} across two model architectures."
}
Markdown (Informal)
[Personalize Your LLM: Fake it then Align it](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.407/) (Zhang et al., Findings 2025)
ACL
- Yijing Zhang, Dyah Adila, Changho Shin, and Frederic Sala. 2025. Personalize Your LLM: Fake it then Align it. In Findings of the Association for Computational Linguistics: NAACL 2025, pages 7287–7301, Albuquerque, New Mexico. Association for Computational Linguistics.