@inproceedings{zhou-etal-2024-leveraging,
title = "Leveraging Web-Crawled Data for High-Quality Fine-Tuning",
author = "Zhou, Jing and
Jiang, Chenglin and
Shen, Wei and
Zhou, Xiao and
He, Xiaonan",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.660/",
doi = "10.18653/v1/2024.findings-emnlp.660",
pages = "11297--11312",
abstract = "Most large language models are fine-tuned using either expensive human-annotated data or GPT-4 generated data which cannot guarantee performance in certain domains. We argue that although the web-crawled data often has formatting errors causing semantic inaccuracies, it can still serve as a valuable source for high-quality supervised fine-tuning in specific domains without relying on advanced models like GPT-4. To this end, we create a paired training dataset automatically by aligning web-crawled data with a smaller set of high-quality data. By training a language model on this dataset, we can convert web data with irregular formats into high-quality ones. Our experiments show that training with the model-transformed data yields better results, surpassing training with only high-quality data by an average score of 9.4{\%} in Chinese math problems. Additionally, our 7B model outperforms several open-source models larger than 32B and surpasses well-known closed-source models such as GPT-3.5, highlighting the efficacy of our approach. We have released our code at https://github.com/zhouj8553/Web{\_}to{\_}SFT."
}
Markdown (Informal)
[Leveraging Web-Crawled Data for High-Quality Fine-Tuning](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.660/) (Zhou et al., Findings 2024)
ACL
- Jing Zhou, Chenglin Jiang, Wei Shen, Xiao Zhou, and Xiaonan He. 2024. Leveraging Web-Crawled Data for High-Quality Fine-Tuning. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 11297–11312, Miami, Florida, USA. Association for Computational Linguistics.