@inproceedings{fanton-roth-2024-shortcuts,
title = "On Shortcuts and Biases: How Finetuned Language Models Distinguish Audience-Specific Instructions in {I}talian and {E}nglish",
author = "Fanton, Nicola and
Roth, Michael",
editor = "Fale{\'n}ska, Agnieszka and
Basta, Christine and
Costa-juss{\`a}, Marta and
Goldfarb-Tarrant, Seraphina and
Nozza, Debora",
booktitle = "Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.gebnlp-1.6/",
doi = "10.18653/v1/2024.gebnlp-1.6",
pages = "78--93",
abstract = "Instructional texts for different audience groups can help to address specific needs, but at the same time run the risk of perpetrating biases. In this paper, we extend previous findings on disparate social norms and subtle stereotypes in wikiHow in two directions: We explore the use of fine-tuned language models to determine how audience-specific instructional texts can be distinguished and we transfer the methodology to another language, Italian, to identify cross-linguistic patterns. We find that language models mostly rely on group terms, gender markings, and attributes reinforcing stereotypes."
}
Markdown (Informal)
[On Shortcuts and Biases: How Finetuned Language Models Distinguish Audience-Specific Instructions in Italian and English](https://preview.aclanthology.org/fix-sig-urls/2024.gebnlp-1.6/) (Fanton & Roth, GeBNLP 2024)
ACL