@inproceedings{zhou-etal-2025-high,
title = "A High-Quality Text-Rich Image Instruction Tuning Dataset via Hybrid Instruction Generation",
author = "Zhou, Shijie and
Zhang, Ruiyi and
Zhou, Yufan and
Chen, Changyou",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2025.coling-main.674/",
pages = "10091--10110",
abstract = "Large multimodal models still struggle with text-rich images because of inadequate training data. Self-Instruct provides an annotation-free way for generating instruction data, but its quality is poor, as multimodal alignment remains a hurdle even for the largest models. In this work, we propose LLaVAR-2, to enhance multimodal alignment for text-rich images through hybrid instruction generation between human annotators and large language models. Specifically, it involves detailed image captions from human annotators, followed by the use of these annotations in tailored text prompts for GPT-4o to curate a dataset. It also implements several mechanisms to filter out low-quality data, and the resulting dataset comprises 424k high-quality pairs of instructions. Empirical results show that models fine-tuned on this dataset exhibit impressive enhancements over those trained with self-instruct data."
}
Markdown (Informal)
[A High-Quality Text-Rich Image Instruction Tuning Dataset via Hybrid Instruction Generation](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2025.coling-main.674/) (Zhou et al., COLING 2025)
ACL