@inproceedings{das-etal-2025-recover,
title = "Recover-{L}o{RA}: Data-Free Accuracy Recovery of Degraded Language Models via Low-Rank Adaptation",
author = "Das, Devleena and
Patwari, Rajeev and
Sirasao, Ashish",
editor = "Potdar, Saloni and
Rojas-Barahona, Lina and
Montella, Sebastien",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2025",
address = "Suzhou (China)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.164/",
pages = "2377--2386",
ISBN = "979-8-89176-333-3",
abstract = "Inference optimizations such as quantization, pruning, format and datatype conversion, model export, and serialization can lead to functional degradations in language model task performance. While most efforts on performance recovery for deployment focus on robust quantization techniques, we focus on recovering model accuracies from any sources that degrade model weights, such as improper model serialization. In this work, we propose Recover-LoRA, a lightweight and dataset agnostic method to recover accuracy in degraded models. Recover-LoRA uses synthetic data and logit distillation to learn LoRA adapters on selective layers that facilitate aligning the degraded model to its full precision model. We investigate the utility of Recover-LoRA across a diverse set of small language models (SLMs), including models with varying attention architectures, multi-head attention (MHA) and group-query attention (GQA), as well as several evaluation datasets. Our results show that Recover-LoRA recovers model accuracies by 5-17{\%} on MHA and GQA SLMs."
}Markdown (Informal)
[Recover-LoRA: Data-Free Accuracy Recovery of Degraded Language Models via Low-Rank Adaptation](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.164/) (Das et al., EMNLP 2025)
ACL