@inproceedings{xiao-etal-2025-stimulate,
title = "Stimulate the Critical Thinking of {LLM}s via Debiasing Discussion",
author = "Xiao, Ruiyu and
Wu, Lei and
Liu, Yuanxing and
Zhang, Weinan and
Liu, Ting",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.579/",
doi = "10.18653/v1/2025.emnlp-main.579",
pages = "11490--11503",
ISBN = "979-8-89176-332-6",
abstract = "Large language models (LLMs) often succumb to users' viewpoints when faced with conflicting perspectives. We identify two key biases underlying this issue : stance homogeneity bias and human preference bias. To address these biases, we propose a novel two-stage training framework: Multi-stance Discussion Sampling and Truth Alignment Training (MDTA). First, we introduce an equal multi-stance discussion framework to automatically generate multi-model discussion datasets. Based on this framework, we construct the first and largest multi-model fair discussion dataset named Eq-Discussion for supervised fine-tuning, reducing stance homogeneity bias. Second, we optimize Reinforcement Learning from Human Feedback (RLHF) to align with discussion correctness, mitigating human preference bias. Extensive experimental results demonstrate that MDTA effectively reduces both biases and significantly enhances the performance of LLMs across a variety of downstream tasks, including reading comprehension, logical reasoning, and social question answering. Furthermore, we observe that MDTA improves the generalization capabilities of LLMs, leading to substantial performance improvements in non-discussion scenarios and on out-of-domain datasets."
}Markdown (Informal)
[Stimulate the Critical Thinking of LLMs via Debiasing Discussion](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.579/) (Xiao et al., EMNLP 2025)
ACL