@inproceedings{hitti-etal-2019-proposed,
title = "Proposed Taxonomy for Gender Bias in Text; A Filtering Methodology for the Gender Generalization Subtype",
author = "Hitti, Yasmeen and
Jang, Eunbee and
Moreno, Ines and
Pelletier, Carolyne",
editor = "Costa-juss{\`a}, Marta R. and
Hardmeier, Christian and
Radford, Will and
Webster, Kellie",
booktitle = "Proceedings of the First Workshop on Gender Bias in Natural Language Processing",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3802",
doi = "10.18653/v1/W19-3802",
pages = "8--17",
abstract = "The purpose of this paper is to present an empirical study on gender bias in text. Current research in this field is focused on detecting and correcting for gender bias in existing machine learning models rather than approaching the issue at the dataset level. The underlying motivation is to create a dataset which could enable machines to learn to differentiate bias writing from non-bias writing. A taxonomy is proposed for structural and contextual gender biases which can manifest themselves in text. A methodology is proposed to fetch one type of structural gender bias, Gender Generalization. We explore the IMDB movie review dataset and 9 different corpora from Project Gutenberg. By filtering out irrelevant sentences, the remaining pool of candidate sentences are sent for human validation. A total of 6123 judgments are made on 1627 sentences and after a quality check on randomly selected sentences we obtain an accuracy of 75{\%}. Out of the 1627 sentences, 808 sentence were labeled as Gender Generalizations. The inter-rater reliability amongst labelers was of 61.14{\%}.",
}
Markdown (Informal)
[Proposed Taxonomy for Gender Bias in Text; A Filtering Methodology for the Gender Generalization Subtype](https://aclanthology.org/W19-3802) (Hitti et al., GeBNLP 2019)
ACL