@inproceedings{medvedeva-etal-2017-sparse,
title = "When Sparse Traditional Models Outperform Dense Neural Networks: the Curious Case of Discriminating between Similar Languages",
author = "Medvedeva, Maria and
Kroon, Martin and
Plank, Barbara",
editor = {Nakov, Preslav and
Zampieri, Marcos and
Ljube{\v{s}}i{\'c}, Nikola and
Tiedemann, J{\"o}rg and
Malmasi, Shevin and
Ali, Ahmed},
booktitle = "Proceedings of the Fourth Workshop on {NLP} for Similar Languages, Varieties and Dialects ({V}ar{D}ial)",
month = apr,
year = "2017",
address = "Valencia, Spain",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/W17-1219/",
doi = "10.18653/v1/W17-1219",
pages = "156--163",
abstract = "We present the results of our participation in the VarDial 4 shared task on discriminating closely related languages. Our submission includes simple traditional models using linear support vector machines (SVMs) and a neural network (NN). The main idea was to leverage language group information. We did so with a two-layer approach in the traditional model and a multi-task objective in the neural network case. Our results confirm earlier findings: simple traditional models outperform neural networks consistently for this task, at least given the amount of systems we could examine in the available time. Our two-layer linear SVM ranked 2nd in the shared task."
}
Markdown (Informal)
[When Sparse Traditional Models Outperform Dense Neural Networks: the Curious Case of Discriminating between Similar Languages](https://preview.aclanthology.org/fix-sig-urls/W17-1219/) (Medvedeva et al., VarDial 2017)
ACL