@inproceedings{zhou-etal-2025-revisiting,
title = "Revisiting Pruning vs Quantization for Small Language Models",
author = "Zhou, Zihan and
Kurz, Simon and
Zhao, Zhixue",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.645/",
doi = "10.18653/v1/2025.findings-emnlp.645",
pages = "12055--12070",
ISBN = "979-8-89176-335-7",
abstract = "Deploying language models on resource-constrained devices, such as mobile phones, wearables, and on-device AI assistants, demands compact, efficient models without sacrificing performance. Compressing Small Language Models (SLMs) is particularly suited for these scenarios, yet their compression dynamics remain underexplored compared to Large Language Models (LLMs). We systematically evaluate leading post-training pruning (SparseGPT, Wanda) and quantization (GPTQ, AWQ) methods across six SLMs from 0.5 to 3.8B, seven languages, and seven downstream tasks. Our results show that quantization consistently outperforms pruning in preserving model fidelity, multilingual perplexity, and reasoning accuracy. However, quantization{'}s advantages diminish on complex knowledge and reasoning tasks like OpenBookQA, highlighting a disconnect between compression fidelity and downstream task performance. Notably, trends observed in LLMs (e.g., Wanda{'}s competitive performance to SparseGPT) do not generalize to SLMs. For practitioners, we recommend prioritizing quantization (particularly AWQ) for SLM compression and caution against relying on a single metric."
}Markdown (Informal)
[Revisiting Pruning vs Quantization for Small Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.645/) (Zhou et al., Findings 2025)
ACL