@inproceedings{hu-etal-2025-trimllm,
title = "{T}rim{LLM}: Progressive Layer Dropping for Domain-Specific {LLM}s",
author = "Hu, Lanxiang and
Rosing, Tajana and
Zhang, Hao",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.33/",
pages = "667--681",
ISBN = "979-8-89176-251-0",
abstract = "Specializing large language models (LLMs) for local deployment in domain-specific use cases is necessary for strong performance while meeting latency and privacy constraints. However, conventional task-specific adaptation approaches do not show simultaneous memory saving and inference speedup at deployment time. Practical compression techniques like quantization and pruning require dedicated hardware or kernel support to achieve measured inference speedup. We develop TrimLLM based on the layer-wise specialization phenomenon we empirically observed and verified on contemporary LLMs. TrimLLM reduces the depth of LLMs via progressive layer dropping. We show it retains LLMs' capacity in specific domains and achieves inference speedup irrespective of hardware and deep learning frameworks. We evaluated TrimLLM on LLMs of various sizes for inference; models adapted on medical, legal, and financial datasets all demonstrate $2.1 - 5.7\times$ inference speedup on consumer GPUs and up to $3.1\times$ speedup on A100 when compared to state-of-the-art model compression algorithms, with no loss in accuracy at $50\sim 60${\%} model compression ratio."
}
Markdown (Informal)
[TrimLLM: Progressive Layer Dropping for Domain-Specific LLMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.33/) (Hu et al., ACL 2025)
ACL
- Lanxiang Hu, Tajana Rosing, and Hao Zhang. 2025. TrimLLM: Progressive Layer Dropping for Domain-Specific LLMs. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 667–681, Vienna, Austria. Association for Computational Linguistics.