@inproceedings{longkai-etal-2025-hookmoe,
title = "{H}ook{M}o{E}: A learnable performance compensation strategy of Mixture-of-Experts for {LLM} inference acceleration",
author = "Longkai, Cheng and
He, Along and
Li, Mulin and
Xueshuo, Xie and
Li, Tao",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1610/",
pages = "31582--31594",
ISBN = "979-8-89176-332-6",
abstract = "Mixture of Experts (MoE) architectures have emerged as a promising paradigm for scaling model capacity through top-$k$ routing mechanisms. Although reducing the number of activated experts inherently enables inference acceleration, this efficiency gain typically comes at the cost of significant performance degradation. To address this trade-off between efficiency and performance, we propose HookMoE, a plug-and-play single-layer compensation framework that effectively restores performance using only a small post-training calibration set. Our method strategically inserts a lightweight trainable Hook module immediately preceding selected transformer blocks. Comprehensive evaluations on four popular MoE models, with an average performance degradation of only 2.5{\%} across various benchmarks, our method reduces the number of activated experts by more than 50{\%} and achieves a 1.42$\times$ inference speed-up during the prefill stage. Through systematic analysis, we further reveal that the upper layers require fewer active experts, offering actionable insights for refining dynamic expert selection strategies and enhancing the overall efficiency of MoE models."
}Markdown (Informal)
[HookMoE: A learnable performance compensation strategy of Mixture-of-Experts for LLM inference acceleration](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1610/) (Longkai et al., EMNLP 2025)
ACL