@inproceedings{moon-etal-2025-call,
title = "Call for Rigor in Reporting Quality of Instruction Tuning Data",
author = "Moon, Hyeonseok and
Seo, Jaehyung and
Lim, Heuiseok",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/transition-to-people-yaml/2025.acl-short.9/",
doi = "10.18653/v1/2025.acl-short.9",
pages = "100--109",
ISBN = "979-8-89176-252-7",
abstract = "Instruction tuning is crucial for adapting large language models (LLMs) to align with user intentions. Numerous studies emphasize the significance of the quality of instruction tuning (IT) data, revealing a strong correlation between IT data quality and the alignment performance of LLMs. In these studies, the quality of IT data is typically assessed by evaluating the performance of LLMs trained with that data. However, we identified a prevalent issue in such practice: hyperparameters for training models are often selected arbitrarily without adequate justification. We observed significant variations in hyperparameters applied across different studies, even when training the same model with the same data. In this study, we demonstrate the potential problems arising from this practice and emphasize the need for careful consideration in verifying data quality. Through our experiments on the quality of LIMA data and a selected set of 1,000 Alpaca data points, we demonstrate that arbitrary hyperparameter decisions can make any arbitrary conclusion."
}
Markdown (Informal)
[Call for Rigor in Reporting Quality of Instruction Tuning Data](https://preview.aclanthology.org/transition-to-people-yaml/2025.acl-short.9/) (Moon et al., ACL 2025)
ACL