@inproceedings{bhatia-etal-2021-fine,
title = "Fine-tuning Distributional Semantic Models for Closely-Related Languages",
author = "Bhatia, Kushagra and
Aggarwal, Divyanshu and
Vaidya, Ashwini",
editor = {Zampieri, Marcos and
Nakov, Preslav and
Ljube{\v{s}}i{\'c}, Nikola and
Tiedemann, J{\"o}rg and
Scherrer, Yves and
Jauhiainen, Tommi},
booktitle = "Proceedings of the Eighth Workshop on NLP for Similar Languages, Varieties and Dialects",
month = apr,
year = "2021",
address = "Kiyv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2021.vardial-1.7/",
pages = "60--66",
abstract = "In this paper we compare the performance of three models: SGNS (skip-gram negative sampling) and augmented versions of SVD (singular value decomposition) and PPMI (Positive Pointwise Mutual Information) on a word similarity task. We particularly focus on the role of hyperparameter tuning for Hindi based on recommendations made in previous work (on English). Our results show that there are language specific preferences for these hyperparameters. We extend the best settings for Hindi to a set of related languages: Punjabi, Gujarati and Marathi with favourable results. We also find that a suitably tuned SVD model outperforms SGNS for most of our languages and is also more robust in a low-resource setting."
}
Markdown (Informal)
[Fine-tuning Distributional Semantic Models for Closely-Related Languages](https://preview.aclanthology.org/ingest_wac_2008/2021.vardial-1.7/) (Bhatia et al., VarDial 2021)
ACL