@inproceedings{achara-chhabra-2025-watching,
title = "Watching the {AI} Watchdogs: A Fairness and Robustness Analysis of {AI} Safety Moderation Classifiers",
author = "Achara, Akshit and
Chhabra, Anshuman",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.22/",
pages = "253--264",
ISBN = "979-8-89176-190-2",
abstract = "AI Safety Moderation (ASM) classifiers are designed to moderate content on social media platforms and to serve as guardrails that prevent Large Language Models (LLMs) from being fine-tuned on unsafe inputs. Owing to their potential for disparate impact, it is crucial to ensure that these classifiers: (1) do not unfairly classify content belonging to users from minority groups as unsafe compared to those from majority groups and (2) that their behavior remains robust and consistent across similar inputs. In this work, we thus examine the fairness and robustness of four widely-used, closed-source ASM classifiers: OpenAI Moderation API, Perspective API, Google Cloud Natural Language (GCNL) API, and Clarifai API. We assess fairness using metrics such as demographic parity and conditional statistical parity, comparing their performance against ASM models and a fair-only baseline. Additionally, we analyze robustness by testing the classifiers' sensitivity to small and natural input perturbations. Our findings reveal potential fairness and robustness gaps, highlighting the need to mitigate these issues in future versions of these models."
}
Markdown (Informal)
[Watching the AI Watchdogs: A Fairness and Robustness Analysis of AI Safety Moderation Classifiers](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.22/) (Achara & Chhabra, NAACL 2025)
ACL