@inproceedings{li-etal-2025-learning-committee,
title = "Learning from Committee: Reasoning Distillation from a Mixture of Teachers with Peer-Review",
author = "Li, Zhuochun and
Ji, Yuelyu and
Meng, Rui and
He, Daqing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.217/",
pages = "4190--4205",
ISBN = "979-8-89176-256-5",
abstract = "While reasoning capabilities typically emerge in large language models (LLMs) with tens of billions of parameters, recent research focuses on improving smaller open-source models through knowledge distillation (KD) from commercial LLMs. However, many of these studies rely solely on responses from a single LLM as the gold rationale, unlike the natural human learning process, which involves understanding both the correct answers and the reasons behind mistakes. In this paper, we introduce a novel Fault-Aware DistIllation via Peer-Review (FAIR) approach: 1) instead of merely obtaining rationales from teachers, our method asks teachers to identify and explain the student{'}s mistakes, providing customized instruction learning data; 2) we design a simulated peer-review process between teacher LLMs, and selects only the generated rationales above the acceptance threshold, which reduces the chance of teachers guessing correctly with flawed rationale, improving instructional data quality. Comprehensive experiments and analysis on mathematical, commonsense, and logical reasoning tasks demonstrate the effectiveness of our method. Our code is available at https://github.com/zhuochunli/Learn-from-Committee."
}
Markdown (Informal)
[Learning from Committee: Reasoning Distillation from a Mixture of Teachers with Peer-Review](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.217/) (Li et al., Findings 2025)
ACL