@inproceedings{bexte-etal-2025-increasing,
title = "Increasing the Generalizability of Similarity-Based Essay Scoring Through Cross-Prompt Training",
author = "Bexte, Marie and
Ding, Yuning and
Horbach, Andrea",
editor = {Kochmar, Ekaterina and
Alhafni, Bashar and
Bexte, Marie and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng},
booktitle = "Proceedings of the 20th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.17/",
pages = "225--236",
ISBN = "979-8-89176-270-1",
abstract = "In this paper, we address generic essay scoring, i.e., the use of training data from one writing task to score data from a different task. We approach this by generalizing a similarity-based essay scoring method (Xie et al., 2022) to learning from texts that are written in response to a mixture of different prompts. In our experiments, we compare within-prompt and cross-prompt performance on two large datasets (ASAP and PERSUADE). We combine different amounts of prompts in the training data and show that our generalized method substantially improves cross-prompt performance, especially when an increasing number of prompts is used to form the training data. In the most extreme case, this leads to more than double the performance, increasing QWK from .26 to .55."
}
Markdown (Informal)
[Increasing the Generalizability of Similarity-Based Essay Scoring Through Cross-Prompt Training](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.bea-1.17/) (Bexte et al., BEA 2025)
ACL