@inproceedings{bexte-etal-2022-similarity,
title = "Similarity-Based Content Scoring - How to Make {S}-{BERT} Keep Up With {BERT}",
author = "Bexte, Marie and
Horbach, Andrea and
Zesch, Torsten",
editor = {Kochmar, Ekaterina and
Burstein, Jill and
Horbach, Andrea and
Laarmann-Quante, Ronja and
Madnani, Nitin and
Tack, Ana{\"i}s and
Yaneva, Victoria and
Yuan, Zheng and
Zesch, Torsten},
booktitle = "Proceedings of the 17th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2022)",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.bea-1.16/",
doi = "10.18653/v1/2022.bea-1.16",
pages = "118--123",
abstract = "The dominating paradigm for content scoring is to learn an instance-based model, i.e. to use lexical features derived from the learner answers themselves. An alternative approach that receives much less attention is however to learn a similarity-based model. We introduce an architecture that efficiently learns a similarity model and find that results on the standard ASAP dataset are on par with a BERT-based classification approach."
}
Markdown (Informal)
[Similarity-Based Content Scoring - How to Make S-BERT Keep Up With BERT](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.bea-1.16/) (Bexte et al., BEA 2022)
ACL