@inproceedings{zhao-etal-2025-debiasing,
title = "Debiasing the Fine-Grained Classification Task in {LLM}s with Bias-Aware {PEFT}",
author = "Zhao, Daiying and
Yang, Xinyu and
Chen, Hang",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.717/",
pages = "14731--14746",
ISBN = "979-8-89176-251-0",
abstract = "Fine-grained classification via LLMs is susceptible to more complex label biases compared to traditional classification tasks. Existing bias mitigation strategies, such as retraining, post-hoc adjustment, and parameter-efficient fine-tuning (PEFT) are primarily effective for simple classification biases, such as stereotypes, but fail to adequately address prediction propensity and discriminative ability biases. In this paper, we analyze these two bias phenomena and observe their progressive accumulation from intermediate to deeper layers within LLMs. To mitigate this issue, we propose a bias-aware optimization framework that incorporates two distinct label balance constraints with a PEFT strategy targeting an intermediate layer. Our approach adjusts less than 1{\%} of the model{'}s parameters while effectively curbing bias amplification in deeper layers. Extensive experiments conducted across 12 datasets and 5 LLMs demonstrate that our method consistently outperforms or matches the performance of full-parameter fine-tuning and LoRA, achieving superior results with lower perplexity."
}
Markdown (Informal)
[Debiasing the Fine-Grained Classification Task in LLMs with Bias-Aware PEFT](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.717/) (Zhao et al., ACL 2025)
ACL