@inproceedings{kim-etal-2025-blinded,
title = "Blinded by Context: Unveiling the Halo Effect of {MLLM} in {AI} Hiring",
author = "Kim, Kyusik and
Ryu, Jeongwoo and
Jeon, Hyeonseok and
Suh, Bongwon",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1338/",
pages = "26067--26113",
ISBN = "979-8-89176-256-5",
abstract = "This study investigates the halo effect in AI-driven hiring evaluations using Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs). Through experiments with hypothetical job applications, we examined how these models' evaluations are influenced by non-job-related information, including extracurricular activities and social media images. By analyzing models' responses to Likert-scale questions across different competency dimensions, we found that AI models exhibit significant halo effects, particularly in image-based evaluations, while text-based assessments showed more resistance to bias. The findings demonstrate that supplementary multimodal information can substantially influence AI hiring decisions, highlighting potential risks in AI-based recruitment systems."
}
Markdown (Informal)
[Blinded by Context: Unveiling the Halo Effect of MLLM in AI Hiring](https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1338/) (Kim et al., Findings 2025)
ACL