@inproceedings{mendelsohn-etal-2023-dogwhistles,
title = "From Dogwhistles to Bullhorns: Unveiling Coded Rhetoric with Language Models",
author = "Mendelsohn, Julia and
Le Bras, Ronan and
Choi, Yejin and
Sap, Maarten",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.845/",
doi = "10.18653/v1/2023.acl-long.845",
pages = "15162--15180",
abstract = "Dogwhistles are coded expressions that simultaneously convey one meaning to a broad audience and a second, often hateful or provocative, meaning to a narrow in-group; they are deployed to evade both political repercussions and algorithmic content moderation. For example, the word ``cosmopolitan'' in a sentence such as ``we need to end the cosmopolitan experiment'' can mean ``worldly'' to many but also secretly mean ``Jewish'' to a select few. We present the first large-scale computational investigation of dogwhistles. We develop a typology of dogwhistles, curate the largest-to-date glossary of over 300 dogwhistles with rich contextual information and examples, and analyze their usage in historical U.S. politicians' speeches. We then assess whether a large language model (GPT-3) can identify dogwhistles and their meanings, and find that GPT-3{'}s performance varies widely across types of dogwhistles and targeted groups. Finally, we show that harmful content containing dogwhistles avoids toxicity detection, highlighting online risks presented by such coded language. This work sheds light on the theoretical and applied importance of dogwhistles in both NLP and computational social science, and provides resources to facilitate future research in modeling dogwhistles and mitigating their online harms."
}
Markdown (Informal)
[From Dogwhistles to Bullhorns: Unveiling Coded Rhetoric with Language Models](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.845/) (Mendelsohn et al., ACL 2023)
ACL