@inproceedings{elaraby-litman-2025-lessons,
title = "Lessons Learned in Assessing Student Reflections with {LLM}s",
author = "Elaraby, Mohamed and
Litman, Diane",
editor = {Kochmar, Ekaterina and
Alhafni, Bashar and
Bexte, Marie and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng},
booktitle = "Proceedings of the 20th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.48/",
pages = "672--686",
ISBN = "979-8-89176-270-1",
abstract = "Advances in Large Language Models (LLMs) have sparked growing interest in their potential as explainable text evaluators. While LLMs have shown promise in assessing machine-generated texts in tasks such as summarization and machine translation, their effectiveness in evaluating human-written content{---}such as student writing in classroom settings{---}remains underexplored. In this paper, we investigate LLM-based specificity assessment of student reflections written in response to prompts, using three instruction-tuned models. Our findings indicate that although LLMs may underperform compared to simpler supervised baselines in terms of scoring accuracy, they offer a valuable interpretability advantage. Specifically, LLMs can generate user-friendly explanations that enhance the transparency and usability of automated specificity scoring systems."
}
Markdown (Informal)
[Lessons Learned in Assessing Student Reflections with LLMs](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.48/) (Elaraby & Litman, BEA 2025)
ACL