@inproceedings{tan-etal-2024-personalized,
title = "Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts",
author = "Tan, Zhaoxuan and
Liu, Zheyuan and
Jiang, Meng",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.371/",
doi = "10.18653/v1/2024.emnlp-main.371",
pages = "6459--6475",
abstract = "Personalized large language models (LLMs) aim to tailor interactions, content, and recommendations to individual user preferences. While parameter-efficient fine-tuning (PEFT) methods excel in performance and generalization, they are costly and limit communal benefits when used individually. To this end, we introduce Personalized Pieces (Per-Pcs), a framework that allows users to safely share and assemble personalized PEFT efficiently with collaborative efforts. Per-Pcs involves selecting sharers, breaking their PEFT into pieces, and training gates for each piece. These pieces are added to a pool, from which target users can select and assemble personalized PEFT using their history data. This approach preserves privacy and enables fine-grained user modeling without excessive storage and computation demands. Experimental results show Per-Pcs outperforms non-personalized and PEFT retrieval baselines, offering performance comparable to OPPU with significantly lower resource use across six tasks. Further analysis highlights Per-Pcs{'}s robustness concerning sharer count and selection strategy, pieces sharing ratio, and scalability in computation time and storage space. Per-Pcs{'}s modularity promotes safe sharing, making LLM personalization more efficient, effective, and widely accessible through collaborative efforts."
}
Markdown (Informal)
[Personalized Pieces: Efficient Personalized Large Language Models through Collaborative Efforts](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.371/) (Tan et al., EMNLP 2024)
ACL