@inproceedings{qiao-etal-2025-ig,
title = "{IG}-Pruning: Input-Guided Block Pruning for Large Language Models",
author = "Qiao, Kangyu and
Zhang, Shaolei and
Feng, Yang",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.537/",
pages = "10629--10640",
ISBN = "979-8-89176-332-6",
abstract = "With the growing computational demands of large language models (LLMs), efficient inference has become increasingly critical for practical deployment. Depth pruning has emerged as a promising approach for reducing the computational costs of large language models by removing transformer layers. However, existing methods typically rely on fixed block masks, which can lead to suboptimal performance across different tasks and inputs. In this paper, we propose IG-Pruning, a novel input-aware block-wise pruning method that dynamically selects layer masks at inference time. Our approach consists of two stages: (1) Discovering diverse mask candidates through semantic clustering and L0 optimization, and (2) Implementing efficient dynamic pruning without the need for extensive training. Experimental results demonstrate that our method consistently outperforms state-of-the-art static depth pruning methods, making it particularly suitable for resource-constrained deployment scenarios."
}Markdown (Informal)
[IG-Pruning: Input-Guided Block Pruning for Large Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.537/) (Qiao et al., EMNLP 2025)
ACL