@inproceedings{chang-etal-2025-inputs,
title = "Why Do Some Inputs Break Low-Bit {LLM} Quantization?",
author = "Chang, Ting-Yun and
Zhang, Muru and
Thomason, Jesse and
Jia, Robin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.168/",
pages = "3410--3429",
ISBN = "979-8-89176-332-6",
abstract = "Low-bit weight-only quantization significantly reduces the memory footprint of large language models (LLMs), but disproportionately affects certain examples. We analyze diverse 3-4 bit methods on LLMs ranging from 7B-70B in size and find that the quantization errors of 50 pairs of methods are strongly correlated (avg. $\rho = 0.82$) on FineWeb examples. Moreover, the residual stream magnitudes of full-precision models are indicative of future quantization errors. We further establish a hypothesis that relates the residual stream magnitudes to error amplification and accumulation over layers. Using LLM localization techniques, early exiting, and activation patching, we show that examples with large errors rely on precise residual activations in the late layers, and that the outputs of MLP gates play a crucial role in maintaining the perplexity. Our work reveals why certain examples result in large quantization errors and which model components are most critical for performance preservation."
}Markdown (Informal)
[Why Do Some Inputs Break Low-Bit LLM Quantization?](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.168/) (Chang et al., EMNLP 2025)
ACL
- Ting-Yun Chang, Muru Zhang, Jesse Thomason, and Robin Jia. 2025. Why Do Some Inputs Break Low-Bit LLM Quantization?. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 3410–3429, Suzhou, China. Association for Computational Linguistics.