@inproceedings{velugubantla-etal-2025-ai,
title = "{AI} Tools Can Generate Misculture Visuals! Detecting Prompts Generating Misculture Visuals For Prevention",
author = "Velugubantla, Venkatesh and
Sonani, Raj and
Sathvik, Msvpj",
editor = "Atwell, Katherine and
Biester, Laura and
Borah, Angana and
Dementieva, Daryna and
Ignat, Oana and
Kotonya, Neema and
Liu, Ziyi and
Wan, Ruyuan and
Wilson, Steven and
Zhao, Jieyu",
booktitle = "Proceedings of the Fourth Workshop on NLP for Positive Impact (NLP4PI)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.nlp4pi-1.25/",
doi = "10.18653/v1/2025.nlp4pi-1.25",
pages = "285--293",
ISBN = "978-1-959429-19-7",
abstract = "Advanced AI models that generate realistic images from text prompts offer new creative possibilities but also risk producing culturally insensitive or offensive content. To address this issue, we introduce a novel dataset designed to classify text prompts that could lead to the generation of harmful images misrepresenting different cultures and communities. By training machine learning models on this dataset, we aim to automatically identify and filter out harmful prompts before image generation, balancing cultural sensitivity with creative freedom. Benchmarking with state-ofthe-art language models, our baseline models achieved an accuracy of 73.34{\%}."
}
Markdown (Informal)
[AI Tools Can Generate Misculture Visuals! Detecting Prompts Generating Misculture Visuals For Prevention](https://preview.aclanthology.org/corrections-2025-08/2025.nlp4pi-1.25/) (Velugubantla et al., NLP4PI 2025)
ACL