@inproceedings{han-tsvetkov-2020-fortifying,
title = "Fortifying Toxic Speech Detectors Against Veiled Toxicity",
author = "Han, Xiaochuang and
Tsvetkov, Yulia",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.emnlp-main.622/",
doi = "10.18653/v1/2020.emnlp-main.622",
pages = "7732--7739",
abstract = "Modern toxic speech detectors are incompetent in recognizing disguised offensive language, such as adversarial attacks that deliberately avoid known toxic lexicons, or manifestations of implicit bias. Building a large annotated dataset for such veiled toxicity can be very expensive. In this work, we propose a framework aimed at fortifying existing toxic speech detectors without a large labeled corpus of veiled toxicity. Just a handful of probing examples are used to surface orders of magnitude more disguised offenses. We augment the toxic speech detector`s training data with these discovered offensive examples, thereby making it more robust to veiled toxicity while preserving its utility in detecting overt toxicity."
}
Markdown (Informal)
[Fortifying Toxic Speech Detectors Against Veiled Toxicity](https://preview.aclanthology.org/Author-page-Marten-During-lu/2020.emnlp-main.622/) (Han & Tsvetkov, EMNLP 2020)
ACL