@inproceedings{kambhatla-etal-2023-quantifying,
title = "Quantifying Train-Evaluation Overlap with Nearest Neighbors",
author = "Kambhatla, Gauri and
Nguyen, Thuy and
Choi, Eunsol",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.183/",
doi = "10.18653/v1/2023.findings-acl.183",
pages = "2905--2920",
abstract = "Characterizing benchmark datasets is crucial to interpreting model performance. In this work, we study train-evaluation overlap as a measure of an individual dataset`s adequacy to evaluate model generalization over a wide range of datasets. We quantify the overlap with a simple novel metric based on a nearest neighbors approach between the training and evaluation sets. We identify nearest training examples for each evaluation example by mapping instances with generic and task-specific embedding methods. Our study on eleven classification and extractive QA tasks reveals a wide range of train-evaluation overlap, and we show that the data collection method of the dataset and the difficulty of the task may play a role in the amount of overlap. Lastly, we use our nearest neighbor analysis to identify challenging or potentially mislabeled examples. Our analysis quantifies train-evaluation overlap, providing insights for constructing datasets to study generalization."
}
Markdown (Informal)
[Quantifying Train-Evaluation Overlap with Nearest Neighbors](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-acl.183/) (Kambhatla et al., Findings 2023)
ACL