@inproceedings{kong-etal-2025-neuron,
title = "Neuron Activation Modulation for Text Style Transfer: Guiding Large Language Models",
author = "Kong, Chaona and
Liu, Jianyi and
Tang, Yifan and
Zhang, Ru",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.403/",
pages = "7735--7747",
ISBN = "979-8-89176-256-5",
abstract = "Text style transfer (TST) aims to flexibly adjust the style of text while preserving its core content. Although large language models (LLMs) excel in TST tasks, they often face unidirectional issues due to imbalanced training data and their tendency to generate safer responses. These challenges present a significant obstacle in achieving effective style transfer. To address this issue, we propose a novel method for text style transfer based on neuron activation modulation (NAM-TST). This approach identifies neurons related to style through gradient-based activation difference analysis and calculates the activation differences between the source and target styles. During text generation, we use the activation difference to align the activation values of style-related neurons with those of the target style to guide the model in performing the transfer. This strategy enables the model to generate text that satisfies specific style requirements, effectively mitigating the unidirectional issue inherent in LLMs during style transfer. Experiments on benchmark datasets demonstrate that NAM-TST significantly enhances style transfer quality while preserving content consistency."
}
Markdown (Informal)
[Neuron Activation Modulation for Text Style Transfer: Guiding Large Language Models](https://preview.aclanthology.org/landing_page/2025.findings-acl.403/) (Kong et al., Findings 2025)
ACL