@inproceedings{xu-etal-2025-beyond,
title = "Beyond Single Labels: Improving Conversational Recommendation through {LLM}-Powered Data Augmentation",
author = "Xu, Haozhe and
Wang, Xiaohua and
Lv, Changze and
Zheng, Xiaoqing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.758/",
pages = "15573--15590",
ISBN = "979-8-89176-251-0",
abstract = "Conversational recommender systems (CRSs) enhance recommendation quality by engaging users in multi-turn dialogues, capturing nuanced preferences through natural language interactions. However, these systems often face the false negative issue, where items that a user might like are incorrectly labeled as negative during training, leading to suboptimal recommendations. Expanding the label set through data augmentation presents an intuitive solution but faces the challenge of balancing two key aspects: ensuring semantic relevance and preserving the collaborative information inherent in CRS datasets. To address these issues, we propose a novel data augmentation framework that first leverages an LLM-based semantic retriever to identify diverse and semantically relevant items, which are then filtered by a relevance scorer to remove noisy candidates. Building on this, we introduce a two-stage training strategy balancing semantic relevance and collaborative information. Extensive experiments on two benchmark datasets and user simulators demonstrate significant and consistent performance improvements across various recommenders, highlighting the effectiveness of our approach in advancing CRS performance."
}
Markdown (Informal)
[Beyond Single Labels: Improving Conversational Recommendation through LLM-Powered Data Augmentation](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.758/) (Xu et al., ACL 2025)
ACL