@inproceedings{wecker-etal-2020-clusterdatasplit,
title = "{C}luster{D}ata{S}plit: Exploring Challenging Clustering-Based Data Splits for Model Performance Evaluation",
author = "Wecker, Hanna and
Friedrich, Annemarie and
Adel, Heike",
editor = "Eger, Steffen and
Gao, Yang and
Peyrard, Maxime and
Zhao, Wei and
Hovy, Eduard",
booktitle = "Proceedings of the First Workshop on Evaluation and Comparison of NLP Systems",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.eval4nlp-1.15/",
doi = "10.18653/v1/2020.eval4nlp-1.15",
pages = "155--163",
abstract = "This paper adds to the ongoing discussion in the natural language processing community on how to choose a good development set. Motivated by the real-life necessity of applying machine learning models to different data distributions, we propose a clustering-based data splitting algorithm. It creates development (or test) sets which are lexically different from the training data while ensuring similar label distributions. Hence, we are able to create challenging cross-validation evaluation setups while abstracting away from performance differences resulting from label distribution shifts between training and test data. In addition, we present a Python-based tool for analyzing and visualizing data split characteristics and model performance. We illustrate the workings and results of our approach using a sentiment analysis and a patent classification task."
}
Markdown (Informal)
[ClusterDataSplit: Exploring Challenging Clustering-Based Data Splits for Model Performance Evaluation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.eval4nlp-1.15/) (Wecker et al., Eval4NLP 2020)
ACL