@inproceedings{liu-etal-2025-user,
title = "User Feedback in Human-{LLM} Dialogues: A Lens to Understand Users But Noisy as a Learning Signal",
author = "Liu, Yuhan and
Zhang, Michael JQ and
Choi, Eunsol",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.133/",
pages = "2666--2681",
ISBN = "979-8-89176-332-6",
abstract = "Once language models (LMs) are deployed, they can interact with users long-term, ideally evolving based on their feedback. Asking for direct user feedback can be disruptive; thus, we study harvesting implicit user feedback from user-LM interaction logs. We study two user-LM interaction datasets (WildChat and LMSYS). First, we analyze user feedback in the user-LLM conversation logs, providing insights into when and why such feedback occurs. Second, we study harvesting learning signals from such implicit user feedback. Specifically, we study whether incorporating the contents of user feedback (e.g., user wanted clarification), in addition to the polarity of the feedback, can improve the model performance. We observe mixed results, showing this helps in short human-designed questions (MTBench) but not on longer and more complex questions (WildBench). Together, we provide an in-depth study of implicit user feedback, showing its potential and limitations."
}Markdown (Informal)
[User Feedback in Human-LLM Dialogues: A Lens to Understand Users But Noisy as a Learning Signal](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.133/) (Liu et al., EMNLP 2025)
ACL