@inproceedings{ikram-etal-2025-exploring,
title = "Exploring {LLM}s for Predicting Tutor Strategy and Student Outcomes in Dialogues",
author = "Ikram, Fareya and
Scarlatos, Alexander and
Lan, Andrew",
editor = {Kochmar, Ekaterina and
Alhafni, Bashar and
Bexte, Marie and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng},
booktitle = "Proceedings of the 20th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.55/",
pages = "765--779",
ISBN = "979-8-89176-270-1",
abstract = "Tutoring dialogues have gained significant attention in recent years, given the prominence of online learning and the emerging tutoring abilities of artificial intelligence (AI) agents powered by large language models (LLMs). Recent studies have shown that the strategies used by tutors can have significant effects on student outcomes, necessitating methods to predict how tutors will behave and how their actions impact students. However, few works have studied predicting tutor strategy in dialogues. Therefore, in this work we investigate the ability of modern LLMs, particularly Llama 3 and GPT-4o, to predict both future tutor moves and student outcomes in dialogues, using two math tutoring dialogue datasets. We find that even state-of-the-art LLMs struggle to predict future tutor strategy while tutor strategy is highly indicative of student outcomes, outlining a need for more powerful methods to approach this task."
}
Markdown (Informal)
[Exploring LLMs for Predicting Tutor Strategy and Student Outcomes in Dialogues](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.55/) (Ikram et al., BEA 2025)
ACL