@inproceedings{na-lee-2020-jbnu,
title = "{JBNU} at {S}em{E}val-2020 Task 4: {BERT} and {U}ni{LM} for Commonsense Validation and Explanation",
author = "Na, Seung-Hoon and
Lee, Jong-Hyeon",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.65/",
doi = "10.18653/v1/2020.semeval-1.65",
pages = "527--534",
abstract = "This paper presents our contributions to the SemEval-2020 Task 4 Commonsense Validation and Explanation (ComVE) and includes the experimental results of the two Subtasks B and C of the SemEval-2020 Task 4. Our systems rely on pre-trained language models, i.e., BERT (including its variants) and UniLM, and rank 10th and 7th among 27 and 17 systems on Subtasks B and C, respectively. We analyze the commonsense ability of the existing pretrained language models by testing them on the SemEval-2020 Task 4 ComVE dataset, specifically for Subtasks B and C, the explanation subtasks with multi-choice and sentence generation, respectively."
}
Markdown (Informal)
[JBNU at SemEval-2020 Task 4: BERT and UniLM for Commonsense Validation and Explanation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.65/) (Na & Lee, SemEval 2020)
ACL