@inproceedings{han-etal-2023-fair,
title = "Fair Enough: Standardizing Evaluation and Model Selection for Fairness Research in {NLP}",
author = "Han, Xudong and
Baldwin, Timothy and
Cohn, Trevor",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.eacl-main.23/",
doi = "10.18653/v1/2023.eacl-main.23",
pages = "297--312",
abstract = "Modern NLP systems exhibit a range of biases, which a growing literature on model debiasing attempts to correct. However, current progress is hampered by a plurality of definitions of bias, means of quantification, and oftentimes vague relation between debiasing algorithms and theoretical measures of bias. This paper seeks to clarify the current situation and plot a course for meaningful progress in fair learning, with two key contributions: (1) making clear inter-relations among the current gamut of methods, and their relation to fairness theory; and (2) addressing the practical problem of model selection, which involves a trade-off between fairness and accuracy and has led to systemic issues in fairness research. Putting them together, we make several recommendations to help shape future work."
}
Markdown (Informal)
[Fair Enough: Standardizing Evaluation and Model Selection for Fairness Research in NLP](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.eacl-main.23/) (Han et al., EACL 2023)
ACL