@inproceedings{hu-lee-2026-hatexscore,
title = "{H}ate{XS}core: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations",
author = "Hu, Yujia and
Lee, Roy Ka-Wei",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.198/",
pages = "4221--4240",
ISBN = "979-8-89176-380-7",
abstract = "Hateful speech detection is a key component of content moderation, yet current evaluation frameworks rarely assess why a text is deemed hateful. We introduce , a four-component metric suite designed to evaluate the reasoning quality of model explanations. It assesses (i) conclusion explicitness, (ii) faithfulness and causal grounding of quoted spans, (iii) protected group identification (policy-configurable), and (iv) logical consistency among these elements. Evaluated on six diverse hate speech datasets, reveals interpretability failures and annotation inconsistencies that are invisible to standard metrics like Accuracy or F1. Moreover, human evaluation shows strong agreement with , validating it as a practical tool for trustworthy and transparent moderation. Disclaimer: This paper contains sensitive content that may be disturbing to some readers."
}Markdown (Informal)
[HateXScore: A Metric Suite for Evaluating Reasoning Quality in Hate Speech Explanations](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.198/) (Hu & Lee, EACL 2026)
ACL