@inproceedings{do-etal-2024-autoregressive,
title = "Autoregressive Score Generation for Multi-trait Essay Scoring",
author = "Do, Heejin and
Kim, Yunsu and
Lee, Gary",
editor = "Graham, Yvette and
Purver, Matthew",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2024",
month = mar,
year = "2024",
address = "St. Julian{'}s, Malta",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-eacl.115/",
pages = "1659--1666",
abstract = "Recently, encoder-only pre-trained models such as BERT have been successfully applied in automated essay scoring (AES) to predict a single overall score. However, studies have yet to explore these models in multi-trait AES, possibly due to the inefficiency of replicating BERT-based models for each trait. Breaking away from the existing sole use of *encoder*, we propose an autoregressive prediction of multi-trait scores (ArTS), incorporating a *decoding* process by leveraging the pre-trained T5. Unlike prior regression or classification methods, we redefine AES as a score-generation task, allowing a single model to predict multiple scores. During decoding, the subsequent trait prediction can benefit by conditioning on the preceding trait scores. Experimental results proved the efficacy of ArTS, showing over 5{\%} average improvements in both prompts and traits."
}
Markdown (Informal)
[Autoregressive Score Generation for Multi-trait Essay Scoring](https://preview.aclanthology.org/fix-sig-urls/2024.findings-eacl.115/) (Do et al., Findings 2024)
ACL