@inproceedings{weissburg-etal-2025-llms,
title = "{LLM}s are Biased Teachers: Evaluating {LLM} Bias in Personalized Education",
author = "Weissburg, Iain and
Anand, Sathvika and
Levy, Sharon and
Jeong, Haewon",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.314/",
pages = "5650--5698",
ISBN = "979-8-89176-195-7",
abstract = "With the increasing adoption of large language models (LLMs) in education, concerns about inherent biases in these models have gained prominence. We evaluate LLMs for bias in the personalized educational setting, specifically focusing on the models' roles as ``teachers.'' We reveal significant biases in how models generate and select educational content tailored to different demographic groups, including race, ethnicity, sex, gender, disability status, income, and national origin. We introduce and apply two bias score metrics{---}Mean Absolute Bias (MAB) and Maximum Difference Bias (MDB){---}to analyze 9 open and closed state-of-the-art LLMs. Our experiments, which utilize over 17,000 educational explanations across multiple difficulty levels and topics, uncover that models potentially harm student learning by both perpetuating harmful stereotypes and reversing them. We find that bias is similar for all frontier models, with the highest MAB along income levels while MDB is highest relative to both income and disability status. For both metrics, we find the lowest bias exists for sex/gender and race/ethnicity."
}
Markdown (Informal)
[LLMs are Biased Teachers: Evaluating LLM Bias in Personalized Education](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.314/) (Weissburg et al., Findings 2025)
ACL