@inproceedings{li-etal-2025-aide,
title = "{AIDE}: Attribute-Guided {M}ult{I}-Hop Data Expansion for Data Scarcity in Task-Specific Fine-tuning",
author = "Li, Jiayu and
Zhu, Jennifer and
Liu, Fang and
Qi, Yanjun",
editor = "Rehm, Georg and
Li, Yunyao",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 6: Industry Track)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-industry.77/",
pages = "1083--1101",
ISBN = "979-8-89176-288-6",
abstract = "Fine-tuning large language models (LLMs) for specific tasks requires diverse, high-quality training data. However, obtaining sufficient relevant data remains a significant challenge. Existing data synthesis methods either depend on extensive seed datasets or struggle to balance task relevance and data diversity. To address these challenges, we propose Attribute-guided multI-hop Data Expansion (AIDE), a novel data synthesis framework that uses a multi-hop process to expand very few seed data points while ensuring data diversity and task relevance. AIDE extracts the main topic and key knowledge attributes from the seeds to guide the synthesis steps. The process repeats for K hops, using the generated data as seeds. To prevent irrelevant data generation as the hop depth increases, AIDE incorporates a residual connection mechanism. Our empirical results show that AIDE enables fine-tuning of Mistral-7B, Llama-3.1-8B and Llama-3.2-3B from 10 seeds, surpassing the models fine-tuned on human curated data. Furthermore, AIDE outperforms state-of-the-art data synthesis methods, such as Evol-Instruct, by over 30{\%} in task-specific fine-tuning. Code is available at https://github.com/Code4Graph/AIDE."
}
Markdown (Informal)
[AIDE: Attribute-Guided MultI-Hop Data Expansion for Data Scarcity in Task-Specific Fine-tuning](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-industry.77/) (Li et al., ACL 2025)
ACL