@inproceedings{mansurov-etal-2025-data,
title = "Data Laundering: Artificially Boosting Benchmark Results through Knowledge Distillation",
author = "Mansurov, Jonibek and
Sakip, Akhmed and
Aji, Alham Fikri",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.407/",
pages = "8332--8345",
ISBN = "979-8-89176-251-0",
abstract = "In this paper, we show that knowledge distillation can be subverted to manipulate language model benchmark scores, revealing a critical vulnerability in current evaluation practices. We introduce ``Data Laundering,'' a process that enables the covert transfer of benchmark-specific knowledge through seemingly legitimate intermediate training steps. Through extensive experiments with a 2-layer BERT student model, we show how this approach can achieve substantial improvements in benchmark accuracy (up to 75{\%} on GPQA) without developing genuine reasoning capabilities. Notably, this method can be exploited intentionally or even unintentionally, as researchers may inadvertently adopt this method and inflate scores without realising the implications. While our findings demonstrate the effectiveness of this technique, we present them as a cautionary tale highlighting the urgent need for more robust evaluation methods in AI. This work aims to contribute to the ongoing discussion about evaluation integrity in AI development and the need for benchmarks that more accurately reflect true model capabilities. The code is available at \url{https://github.com/mbzuai-nlp/data_laundering}."
}
Markdown (Informal)
[Data Laundering: Artificially Boosting Benchmark Results through Knowledge Distillation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.407/) (Mansurov et al., ACL 2025)
ACL