@inproceedings{ghate-etal-2024-evaluating,
title = "Evaluating Gender Bias in Multilingual Multimodal {AI} Models: Insights from an {I}ndian Context",
author = "Ghate, Kshitish and
Choudhry, Arjun and
Bannihatti Kumar, Vanya",
editor = "Fale{\'n}ska, Agnieszka and
Basta, Christine and
Costa-juss{\`a}, Marta and
Goldfarb-Tarrant, Seraphina and
Nozza, Debora",
booktitle = "Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.gebnlp-1.21/",
doi = "10.18653/v1/2024.gebnlp-1.21",
pages = "338--350",
abstract = "We evaluate gender biases in multilingual multimodal image and text models in two settings: text-to-image retrieval and text-to-image generation, to show that even seemingly gender-neutral traits generate biased results. We evaluate our framework in the context of people from India, working with two languages: English and Hindi. We work with frameworks built around mCLIP-based models to ensure a thorough evaluation of recent state-of-the-art models in the multilingual setting due to their potential for widespread applications. We analyze the results across 50 traits for retrieval and 8 traits for generation, showing that current multilingual multimodal models are biased towards men for most traits, and this problem is further exacerbated for lower-resource languages like Hindi. We further discuss potential reasons behind this observation, particularly stemming from the bias introduced by the pretraining datasets."
}
Markdown (Informal)
[Evaluating Gender Bias in Multilingual Multimodal AI Models: Insights from an Indian Context](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.gebnlp-1.21/) (Ghate et al., GeBNLP 2024)
ACL