@inproceedings{zhou-etal-2025-debate,
title = "Debate, Reflect, and Distill: Multi-Agent Feedback with Tree-Structured Preference Optimization for Efficient Language Model Enhancement",
author = "Zhou, Xiaofeng and
Huang, Heyan and
Liao, Lizi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.475/",
doi = "10.18653/v1/2025.findings-acl.475",
pages = "9122--9137",
ISBN = "979-8-89176-256-5",
abstract = "Large Language Models (LLMs) continue to set new standards in knowledge-intensive and complex reasoning tasks, yet their high computational demands limit widespread adoption. While distilling large models into smaller ones offers a sustainable solution, current techniques{---}such as static knowledge distillation, resource-intensive reinforcement learning from human feedback, or limited self-reflection{---}struggle to yield substantial and lasting performance gains. In this paper, we present a novel Debate and Reflect (D{\&}R) framework that orchestrates multi-turn debates between smaller models and stronger teacher models, eliciting actionable feedback (e.g., error analysis, corrective strategies) to guide student models. Further, we introduce Tree-structured Direct Preference Optimization (T-DPO) to efficiently leverage these debate logs, organizing interactions into a hierarchical format for effective training. Empirical evaluations across diverse NLP benchmarks demonstrate that our approach significantly improves smaller-model accuracy, robustness, and generalization, outperforming conventional baselines by a large margin."
}
Markdown (Informal)
[Debate, Reflect, and Distill: Multi-Agent Feedback with Tree-Structured Preference Optimization for Efficient Language Model Enhancement](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.475/) (Zhou et al., Findings 2025)
ACL