@inproceedings{forooghi-etal-2024-whitening,
title = "Whitening Not Recommended for Classification Tasks in {LLM}s",
author = "Forooghi, Ali and
Sadeghi, Shaghayegh and
Lu, Jianguo",
editor = "Zhao, Chen and
Mosbach, Marius and
Atanasova, Pepa and
Goldfarb-Tarrent, Seraphina and
Hase, Peter and
Hosseini, Arian and
Elbayad, Maha and
Pezzelle, Sandro and
Mozes, Maximilian",
booktitle = "Proceedings of the 9th Workshop on Representation Learning for NLP (RepL4NLP-2024)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.repl4nlp-1.21/",
pages = "285--289",
abstract = "Sentence embedding is a cornerstone in NLP. Whitening has been claimed to be an effective method to improve embeddings obtained from Large Language Models (LLMs) for sentence embedding. However, we find that the effectiveness of whitening is model-dependent and task-dependent. In particular, whitening degenerates embeddings for classification tasks. The conclusion is supported by extensive experiments. A by-product of our research is embedding evaluation platform for LLMs called SentEval+"
}
Markdown (Informal)
[Whitening Not Recommended for Classification Tasks in LLMs](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2024.repl4nlp-1.21/) (Forooghi et al., RepL4NLP 2024)
ACL