@inproceedings{klein-nabi-2025-contrastive,
title = "Contrastive Perplexity for Controlled Generation: An Application in Detoxifying Large Language Models",
author = "Klein, Tassilo and
Nabi, Moin",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.125/",
pages = "2493--2508",
ISBN = "979-8-89176-251-0",
abstract = "The generation of toxic content by large language models (LLMs) remains a critical challenge for the safe deployment of language technology. We propose a novel framework for implicit knowledge editing and controlled text generation by fine-tuning LLMs with a prototype-based contrastive perplexity objective. Central to our method is the construction of hard negatives{---}toxic outputs that are generated through adversarial paraphrasing to be semantically similar and model probability to their non-toxic counterparts. By training on these challenging and realistic pairs, our approach ensures robust and stable contrastive optimization. Experimental results in the domain of detoxification demonstrate that our method significantly reduces toxic generation while maintaining strong performance on downstream tasks such as commonsense reasoning and reading comprehension. Our findings highlight the effectiveness of exploiting hard negatives for attribute-aware fine-tuning."
}
Markdown (Informal)
[Contrastive Perplexity for Controlled Generation: An Application in Detoxifying Large Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.125/) (Klein & Nabi, ACL 2025)
ACL