@inproceedings{kolodin-ianina-2025-gigaembeddings,
title = "{G}iga{E}mbeddings {---} Efficient {R}ussian Language Embedding Model",
author = "Kolodin, Egor and
Ianina, Anastasia",
editor = "Piskorski, Jakub and
P{\v{r}}ib{\'a}{\v{n}}, Pavel and
Nakov, Preslav and
Yangarber, Roman and
Marcinczuk, Michal",
booktitle = "Proceedings of the 10th Workshop on Slavic Natural Language Processing (Slavic NLP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bsnlp-1.3/",
pages = "17--24",
ISBN = "978-1-959429-57-9",
abstract = "We introduce GigaEmbeddings, a novel framework for training high-performance Russian-focused text embeddings through hierarchical instruction tuning of the decoder-only LLM designed specifically for Russian language (GigaChat-3B). Our three-stage pipeline, comprising large-scale contrastive pre-training in web-scale corpora, fine-tuning with hard negatives, and multitask generalization across retrieval, classification, and clustering tasks, addresses key limitations of existing methods by unifying diverse objectives and leveraging synthetic data generation. Architectural innovations include bidirectional attention for contextual modeling, latent attention pooling for robust sequence aggregation, and strategic pruning of 25{\%} of transformer layers to enhance efficiency without compromising performance. Evaluated on the ruMTEB benchmark spanning 23 multilingual tasks, GigaEmbeddings achieves state-of-the-art results (69.1 avg. score), outperforming strong baselines with a larger number of parameters."
}
Markdown (Informal)
[GigaEmbeddings — Efficient Russian Language Embedding Model](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bsnlp-1.3/) (Kolodin & Ianina, BSNLP 2025)
ACL