@inproceedings{petukhova-kochmar-2025-intent,
title = "Intent Matters: Enhancing {AI} Tutoring with Fine-Grained Pedagogical Intent Annotation",
author = "Petukhova, Kseniia and
Kochmar, Ekaterina",
editor = {Kochmar, Ekaterina and
Alhafni, Bashar and
Bexte, Marie and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng},
booktitle = "Proceedings of the 20th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.bea-1.63/",
pages = "860--872",
ISBN = "979-8-89176-270-1",
abstract = "Large language models (LLMs) hold great promise for educational applications, particularly in intelligent tutoring systems. However, effective tutoring requires alignment with pedagogical strategies {--} something current LLMs lack without task-specific adaptation. In this work, we explore whether fine-grained annotation of teacher intents can improve the quality of LLM-generated tutoring responses. We focus on MathDial, a dialog dataset for math instruction, and apply an automated annotation framework to re-annotate a portion of the dataset using a detailed taxonomy of eleven pedagogical intents. We then fine-tune an LLM using these new annotations and compare its performance to models trained on the original four-category taxonomy. Both automatic and qualitative evaluations show that the fine-grained model produces more pedagogically aligned and effective responses. Our findings highlight the value of intent specificity for controlled text generation in educational settings, and we release our annotated data and code to facilitate further research."
}
Markdown (Informal)
[Intent Matters: Enhancing AI Tutoring with Fine-Grained Pedagogical Intent Annotation](https://preview.aclanthology.org/landing_page/2025.bea-1.63/) (Petukhova & Kochmar, BEA 2025)
ACL