@inproceedings{zhao-etal-2025-prompt,
title = "Prompt Tuning Can Simply Adapt Large Language Models to Text Encoders",
author = "Zhao, Kaiyan and
Wu, Qiyu and
Miao, Zhongtao and
Tsuruoka, Yoshimasa",
editor = "Adlakha, Vaibhav and
Chronopoulou, Alexandra and
Li, Xiang Lorraine and
Majumder, Bodhisattwa Prasad and
Shi, Freda and
Vernikos, Giorgos",
booktitle = "Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)",
month = may,
year = "2025",
address = "Albuquerque, NM",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/moar-dois/2025.repl4nlp-1.3/",
doi = "10.18653/v1/2025.repl4nlp-1.3",
pages = "38--50",
ISBN = "979-8-89176-245-9",
abstract = "Recently, many works have been attempting to adapt Large Language Models (LLMs) for sentence embedding, with most of them fine-tuning LLMs towards the contrastive objective and enabling bi-directional attention for better performance, using LoRA to address the large model scale.In this work, we suggest that this adaptation can also be simply and effectively achieved using causal attention and with even fewer trainable parameters through soft prompt tuning, as an alternative to fine-tuning with LoRA and other methods with extra post-training tasks.Our method only optimizes a few learnable tokens while keeping the rest of the model frozen.Through experiments on a diverse set of evaluation tasks, we find that simply tuning only a few tokens can achieve a competitive performance with that of fine-tuning with LoRA. The percentage of trainable parameters can be reduced to less than 0.001{\%}. Moreover, we also demonstrate that turning causal attention to bi-directional attention with or without extra post-training tasks does not provide additional benefit when soft prompt tuning is applied, suggesting that causal attention can be naturally used in decoder-only LLMs for sentence embedding adaptation."
}
Markdown (Informal)
[Prompt Tuning Can Simply Adapt Large Language Models to Text Encoders](https://preview.aclanthology.org/moar-dois/2025.repl4nlp-1.3/) (Zhao et al., RepL4NLP 2025)
ACL