@inproceedings{koirala-niraula-2021-npvec1,
title = "{NPV}ec1: Word Embeddings for {N}epali - Construction and Evaluation",
author = "Koirala, Pravesh and
Niraula, Nobal B.",
editor = "Rogers, Anna and
Calixto, Iacer and
Vuli{\'c}, Ivan and
Saphra, Naomi and
Kassner, Nora and
Camburu, Oana-Maria and
Bansal, Trapit and
Shwartz, Vered",
booktitle = "Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.repl4nlp-1.18/",
doi = "10.18653/v1/2021.repl4nlp-1.18",
pages = "174--184",
abstract = "Word Embedding maps words to vectors of real numbers. It is derived from a large corpus and is known to capture semantic knowledge from the corpus. Word Embedding is a critical component of many state-of-the-art Deep Learning techniques. However, generating good Word Embeddings is a special challenge for low-resource languages such as Nepali due to the unavailability of large text corpus. In this paper, we present NPVec1 which consists of 25 state-of-art Word Embeddings for Nepali that we have derived from a large corpus using Glove, Word2Vec, FastText, and BERT. We further provide intrinsic and extrinsic evaluations of these Embeddings using well established metrics and methods. These models are trained using 279 million word tokens and are the largest Embeddings ever trained for Nepali language. Furthermore, we have made these Embeddings publicly available to accelerate the development of Natural Language Processing (NLP) applications in Nepali."
}
Markdown (Informal)
[NPVec1: Word Embeddings for Nepali - Construction and Evaluation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.repl4nlp-1.18/) (Koirala & Niraula, RepL4NLP 2021)
ACL