@inproceedings{chimoto-etal-2026-calibrating,
title = "Calibrating Beyond {E}nglish: Language Diversity for Better Quantized Multilingual {LLM}s",
author = "Chimoto, Everlyn Asiko and
Elhoushi, Mostafa and
Bassett, Bruce",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.223/",
pages = "4822--4838",
ISBN = "979-8-89176-380-7",
abstract = "Quantization is an effective technique for reducing the storage footprint and computational costs of Large Language Models (LLMs), but it often results in performance degradation. Existing post-training quantization methods typically use small, English-only calibration sets; however, their impact on multilingual models remains underexplored. We systematically evaluate eight calibration settings (five single-language and three multilingual mixes) across two quantizers (GPTQ, AWQ) on data from 10 different languages. Our findings reveal a consistent trend: non-English and multilingual calibration sets significantly improve perplexity compared to English-only baselines. Specifically, we observe notable average perplexity gains across both quantizers on Llama3.1 8B and Qwen2.5 7B, with multilingual mixes achieving the largest overall reductions of up to 3.52 perplexity gain. Furthermore, our analysis indicates that tailoring calibration sets to the evaluation language yields the largest improvements for individual languages, underscoring the importance of linguistic alignment. We also identify specific failure cases where certain language-quantizer combinations degrade performance, which we trace to differences in activation range distributions across languages. These results highlight that static, one-size-fits-all calibration is suboptimal, and that tailoring calibration data, both in language and diversity, plays a crucial role in robustly quantizing multilingual LLMs."
}Markdown (Informal)
[Calibrating Beyond English: Language Diversity for Better Quantized Multilingual LLMs](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.223/) (Chimoto et al., EACL 2026)
ACL