@inproceedings{wu-etal-2025-evaluating,
title = "Evaluating Fairness in Large Vision-Language Models Across Diverse Demographic Attributes and Prompts",
author = "Wu, Xuyang and
Wang, Yuan and
Wu, Hsin-Tai and
Tao, Zhiqiang and
Fang, Yi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1251/",
doi = "10.18653/v1/2025.findings-emnlp.1251",
pages = "22973--22991",
ISBN = "979-8-89176-335-7",
abstract = "Large vision-language models (LVLMs) have recently achieved significant progress, demonstrating strong capabilities in open-world visual understanding. However, it is not yet clear how LVLMs address demographic biases in real life, especially the disparities across attributes such as gender, skin tone, age and race. In this paper, We empirically investigate visual fairness in several mainstream LVLMs by auditing their performance disparities across demographic attributes using public fairness benchmark datasets (e.g., FACET, UTKFace). Our fairness evaluation framework employs direct and single-choice question prompt on visual question-answering/classification tasks. Despite advancements in visual understanding, our zero-shot prompting results show that both open-source and closed-source LVLMs continue to exhibit fairness issues across different prompts and demographic groups. Furthermore, we propose a potential multi-modal Chain-of-thought (CoT) based strategy for unfairness mitigation, applicable to both open-source and closed-source LVLMs. This approach enhances transparency and offers a scalable solution for addressing fairness, providing a solid foundation for future research and practical efforts in unfairness mitigation. The dataset and code used in this study are publicly available at this GitHub Repository."
}Markdown (Informal)
[Evaluating Fairness in Large Vision-Language Models Across Diverse Demographic Attributes and Prompts](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1251/) (Wu et al., Findings 2025)
ACL