@inproceedings{mehri-etal-2026-beyond,
title = "Beyond Sample-Level Feedback: Using Reference-Level Feedback to Guide Data Synthesis",
author = {Mehri, Shuhaib and
Chen, Xiusi and
Ji, Heng and
Hakkani-T{\"u}r, Dilek},
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.7/",
pages = "141--164",
ISBN = "979-8-89176-380-7",
abstract = "High-quality instruction-tuning data is crucial for developing Large Language Models (LLMs) that can effectively navigate real-world tasks and follow human instructions. While synthetic data generation offers a scalable approach for creating such datasets, it imposes a quality ceiling where models trained on the data cannot outperform the LLM generating it. To overcome this limitation, we introduce Reference-Level Feedback, a paradigm that extracts desirable characteristics from carefully curated reference samples to guide the synthesis of higher-quality instruction-response pairs. Using this approach, we synthesize REFED, a dataset of 10K instruction-response pairs. Fine-tuning Llama-3.1-8B-Instruct and Mistral-7B-Instruct on REFED demonstrate state-of-the-art performance among similarly sized models, notably reaching a 43.96{\%} length-controlled win-rate on AlpacaEval 2.0. Extensive experiments demonstrate that Reference-Level Feedback consistently outperforms traditional sample-level feedback methods, generalizes across model architectures, and produces high-quality and diverse data at low cost."
}Markdown (Informal)
[Beyond Sample-Level Feedback: Using Reference-Level Feedback to Guide Data Synthesis](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.7/) (Mehri et al., EACL 2026)
ACL