@inproceedings{de-wynter-2025-awes,
title = "Awes, Laws, and Flaws From Today{'}s {LLM} Research",
author = "de Wynter, Adrian",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.664/",
doi = "10.18653/v1/2025.findings-acl.664",
pages = "12834--12854",
ISBN = "979-8-89176-256-5",
abstract = "We perform a critical examination of the scientific methodology behind contemporary large language model (LLM) research. For this we assess over 2,000 research works released between 2020 and 2024 based on criteria typical of what is considered good research (e.g. presence of statistical tests and reproducibility), and cross-validate it with arguments that are at the centre of controversy (e.g., claims of emergent behaviour). We find multiple trends, such as declines in ethics disclaimers, a rise of LLMs as evaluators, and an increase on claims of LLM reasoning abilities without leveraging human evaluation. We note that conference checklists are effective at curtailing some of these issues, but balancing velocity and rigour in research cannot solely rely on these. We tie all these findings to findings from recent meta-reviews and extend recommendations on how to address what does, does not, and should work in LLM research."
}
Markdown (Informal)
[Awes, Laws, and Flaws From Today’s LLM Research](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.664/) (de Wynter, Findings 2025)
ACL