@inproceedings{jeon-etal-2025-iterative,
title = "Iterative Prompt Refinement for Safer Text-to-Image Generation",
author = "Jeon, Jinwoo and
Oh, JunHyeok and
Lee, Hayeong and
Lee, Byung-Jun",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.913/",
doi = "10.18653/v1/2025.emnlp-main.913",
pages = "18091--18107",
ISBN = "979-8-89176-332-6",
abstract = "Text-to-Image (T2I) models have made remarkable progress in generating images from text prompts, but their output quality and safety still depend heavily on how prompts are phrased. Existing safety methods typically refine prompts using large language models (LLMs), but they overlook the images produced, which can result in unsafe outputs or unnecessary changes to already safe prompts. To address this, we propose an iterative prompt refinement algorithm that uses Vision Language Models (VLMs) to analyze both the input prompts and the generated images. By leveraging visual feedback, our method refines prompts more effectively, improving safety while maintaining user intent and reliability comparable to existing LLM-based approaches. Additionally, we introduce a new dataset labeled with both textual and visual safety signals using off-the-shelf multi-modal LLM, enabling supervised fine-tuning. Experimental results demonstrate that our approach produces safer outputs without compromising alignment with user intent, offering a practical solution for generating safer T2I content. $\textbf{\textcolor{red}{WARNING: This paper contains examples of harmful or inappropriate images generated by models.}}$"
}Markdown (Informal)
[Iterative Prompt Refinement for Safer Text-to-Image Generation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.913/) (Jeon et al., EMNLP 2025)
ACL