@inproceedings{calamai-etal-2025-benchmarking,
title = "Benchmarking the Benchmarks: Reproducing Climate-Related {NLP} Tasks",
author = "Calamai, Tom and
Balalau, Oana and
Suchanek, Fabian M.",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.925/",
pages = "17967--18009",
ISBN = "979-8-89176-256-5",
abstract = "Significant efforts have been made in the NLP community to facilitate the automatic analysis of climate-related corpora by tasks such as climate-related topic detection, climate risk classification, question answering over climate topics, and many more. In this work, we perform a reproducibility study on 8 tasks and 29 datasets, testing 6 models. We find that many tasks rely heavily on surface-level keyword patterns rather than deeper semantic or contextual understanding. Moreover, we find that 96{\%} of the datasets contain annotation issues, with 16.6{\%} of the sampled wrong predictions of a zero-shot classifier being actually clear annotation mistakes, and 38.8{\%} being ambiguous examples.These results call into question the reliability of current benchmarks to meaningfully compare models and highlight the need for improved annotation practices. We conclude by outlining actionable recommendations to enhance dataset quality and evaluation robustness."
}
Markdown (Informal)
[Benchmarking the Benchmarks: Reproducing Climate-Related NLP Tasks](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.925/) (Calamai et al., Findings 2025)
ACL