@inproceedings{subramani-etal-2025-simba,
title = "{S}im{BA}: Simplifying Benchmark Analysis Using Performance Matrices Alone",
author = "Subramani, Nishant and
Gomez, Alfredo and
Diab, Mona T.",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.711/",
doi = "10.18653/v1/2025.findings-emnlp.711",
pages = "13220--13233",
ISBN = "979-8-89176-335-7",
abstract = "Modern language models are evaluated on large benchmarks, which are difficult to make sense of, especially for model selection.Looking at the raw evaluation numbers themselves using a model-centric lens, we propose SimBA, a three phase framework to Simplify Benchmark Analysis. The three phases of SimBA are: stalk, where we conduct dataset {\&} model comparisons, prowl, where we discover a representative subset, and pounce, where we use the representative subset to predict performance on a held-out set of models. Applying SimBA to three popular LM benchmarks: HELM, MMLU, and BigBenchLite reveals that across all three benchmarks, datasets and models relate strongly to one another (stalk). We develop an representative set discovery algorithm which covers a benchmark using raw evaluation scores alone. Using our algorithm, we find that with 6.25{\%} (1/16), 1.7{\%} (1/58), and 28.4{\%} (21/74) of the datasets for HELM, MMLU, and BigBenchLite respectively, we achieve coverage levels of at least 95{\%} (prowl). Additionally, using just these representative subsets, we can both preserve model ranks and predict performance on a held-out set of models with near zero mean-squared error (pounce). Taken together, SimBA can help model developers improve efficiency during model training and dataset creators validate whether their newly created dataset differs from existing datasets in a benchmark. Our code is open source, available at https://github.com/nishantsubramani/simba."
}Markdown (Informal)
[SimBA: Simplifying Benchmark Analysis Using Performance Matrices Alone](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.711/) (Subramani et al., Findings 2025)
ACL