@inproceedings{klemen-etal-2024-si,
title = "{SI}-{NLI}: A {S}lovene Natural Language Inference Dataset and Its Evaluation",
author = "Klemen, Matej and
{\v{Z}}agar, Ale{\v{s}} and
{\v{C}}ibej, Jaka and
Robnik-{\v{S}}ikonja, Marko",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.lrec-main.1294/",
pages = "14859--14870",
abstract = "Natural language inference (NLI) is an important language understanding benchmark. Two deficiencies of this benchmark are: i) most existing NLI datasets exist for English and a few other well-resourced languages, and ii) most NLI datasets are formed with a narrow set of annotators' instructions, allowing the prediction models to capture linguistic clues instead of measuring true reasoning capability. We address both issues and introduce SI-NLI, the first dataset for Slovene natural language inference. The dataset is constructed from scratch using knowledgeable annotators with carefully crafted guidelines aiming to avoid commonly encountered problems in existing NLI datasets. We also manually translate the SI-NLI to English to enable cross-lingual model training and evaluation. Using the newly created dataset and its translation, we train and evaluate a variety of large transformer language models in a monolingual and cross-lingual setting. The results indicate that larger models, in general, achieve better performance. The qualitative analysis shows that the SI-NLI dataset is diverse and that there remains plenty of room for improvement even for the largest models."
}
Markdown (Informal)
[SI-NLI: A Slovene Natural Language Inference Dataset and Its Evaluation](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.lrec-main.1294/) (Klemen et al., LREC-COLING 2024)
ACL