@inproceedings{jacobsen-etal-2022-mulve,
title = "{M}u{LVE}, A Multi-Language Vocabulary Evaluation Data Set",
author = {Jacobsen, Anik and
Mohtaj, Salar and
M{\"o}ller, Sebastian},
editor = "Calzolari, Nicoletta and
B{\'e}chet, Fr{\'e}d{\'e}ric and
Blache, Philippe and
Choukri, Khalid and
Cieri, Christopher and
Declerck, Thierry and
Goggi, Sara and
Isahara, Hitoshi and
Maegaard, Bente and
Mariani, Joseph and
Mazo, H{\'e}l{\`e}ne and
Odijk, Jan and
Piperidis, Stelios",
booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.lrec-1.70/",
pages = "673--679",
abstract = "Vocabulary learning is vital to foreign language learning. Correct and adequate feedback is essential to successful and satisfying vocabulary training. However, many vocabulary and language evaluation systems perform on simple rules and do not account for real-life user learning data. This work introduces Multi-Language Vocabulary Evaluation Data Set (MuLVE), a data set consisting of vocabulary cards and real-life user answers, labeled indicating whether the user answer is correct or incorrect. The data source is user learning data from the Phase6 vocabulary trainer. The data set contains vocabulary questions in German and English, Spanish, and French as target language and is available in four different variations regarding pre-processing and deduplication. We experiment to fine-tune pre-trained BERT language models on the downstream task of vocabulary evaluation with the proposed MuLVE data set. The results provide outstanding results of {\ensuremath{>}} 95.5 accuracy and F2-score. The data set is available on the European Language Grid."
}
Markdown (Informal)
[MuLVE, A Multi-Language Vocabulary Evaluation Data Set](https://preview.aclanthology.org/fix-sig-urls/2022.lrec-1.70/) (Jacobsen et al., LREC 2022)
ACL