@inproceedings{ye-etal-2025-generic,
title = "From Generic Empathy to Personalized Emotional Support: A Self-Evolution Framework for User Preference Alignment",
author = "Ye, Jing and
Xiang, Lu and
Zhang, Yaping and
Zong, Chengqing",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1024/",
doi = "10.18653/v1/2025.findings-emnlp.1024",
pages = "18826--18853",
ISBN = "979-8-89176-335-7",
abstract = "Effective emotional support hinges on understanding users' emotions and needs to provide meaningful comfort during multi-turn interactions. Large Language Models (LLMs) show great potential for expressing empathy; however, they often deliver generic responses that fail to address users' specific needs. To tackle this issue, we propose a self-evolution framework designed to help LLMs improve their responses to better align with users' implicit preferences concerning personality, emotional state, and specific context. Our framework consists of two distinct phases: \textit{(1)} \textit{Emotional Support Experience Acquisition}, where LLMs are fine-tuned on limited emotional support conversation data to provide basic support, and \textit{(2)} \textit{Self-Improvement for Personalized Emotional Support}, where LLMs leverage self-reflection and self-refinement to generate personalized responses. Through iterative direct preference optimization between the pre- and post-refined responses, our model generates responses that reflect a better understanding of the user{'}s implicit preferences. Extensive experiments and evaluations demonstrate that our method significantly enhances the model{'}s performance in emotional support, reducing unhelpful responses and minimizing discrepancies between user preferences and model outputs."
}Markdown (Informal)
[From Generic Empathy to Personalized Emotional Support: A Self-Evolution Framework for User Preference Alignment](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1024/) (Ye et al., Findings 2025)
ACL