@inproceedings{wang-etal-2022-use,
title = "On the Use of Bert for Automated Essay Scoring: Joint Learning of Multi-Scale Essay Representation",
author = "Wang, Yongjie and
Wang, Chuang and
Li, Ruobing and
Lin, Hui",
editor = "Carpuat, Marine and
de Marneffe, Marie-Catherine and
Meza Ruiz, Ivan Vladimir",
booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.naacl-main.249/",
doi = "10.18653/v1/2022.naacl-main.249",
pages = "3416--3425",
abstract = "In recent years, pre-trained models have become dominant in most natural language processing (NLP) tasks. However, in the area of Automated Essay Scoring (AES), pre-trained models such as BERT have not been properly used to outperform other deep learning models such as LSTM. In this paper, we introduce a novel multi-scale essay representation for BERT that can be jointly learned. We also employ multiple losses and transfer learning from out-of-domain essays to further improve the performance. Experiment results show that our approach derives much benefit from joint learning of multi-scale essay representation and obtains almost the state-of-the-art result among all deep learning models in the ASAP task. Our multi-scale essay representation also generalizes well to CommonLit Readability Prize data set, which suggests that the novel text representation proposed in this paper may be a new and effective choice for long-text tasks."
}
Markdown (Informal)
[On the Use of Bert for Automated Essay Scoring: Joint Learning of Multi-Scale Essay Representation](https://preview.aclanthology.org/fix-sig-urls/2022.naacl-main.249/) (Wang et al., NAACL 2022)
ACL