@inproceedings{jha-mamidi-2017-compliment,
title = "When does a compliment become sexist? Analysis and classification of ambivalent sexism using twitter data",
author = "Jha, Akshita and
Mamidi, Radhika",
editor = {Hovy, Dirk and
Volkova, Svitlana and
Bamman, David and
Jurgens, David and
O{'}Connor, Brendan and
Tsur, Oren and
Do{\u{g}}ru{\"o}z, A. Seza},
booktitle = "Proceedings of the Second Workshop on {NLP} and Computational Social Science",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-2902",
doi = "10.18653/v1/W17-2902",
pages = "7--16",
abstract = "Sexism is prevalent in today{'}s society, both offline and online, and poses a credible threat to social equality with respect to gender. According to ambivalent sexism theory (Glick and Fiske, 1996), it comes in two forms: Hostile and Benevolent. While hostile sexism is characterized by an explicitly negative attitude, benevolent sexism is more subtle. Previous works on computationally detecting sexism present online are restricted to identifying the hostile form. Our objective is to investigate the less pronounced form of sexism demonstrated online. We achieve this by creating and analyzing a dataset of tweets that exhibit benevolent sexism. By using Support Vector Machines (SVM), sequence-to-sequence models and FastText classifier, we classify tweets into {`}Hostile{'}, {`}Benevolent{'} or {`}Others{'} class depending on the kind of sexism they exhibit. We have been able to achieve an F1-score of 87.22{\%} using FastText classifier. Our work helps analyze and understand the much prevalent ambivalent sexism in social media.",
}
Markdown (Informal)
[When does a compliment become sexist? Analysis and classification of ambivalent sexism using twitter data](https://aclanthology.org/W17-2902) (Jha & Mamidi, NLP+CSS 2017)
ACL