@inproceedings{liu-etal-2020-lmve,
title = "{LMVE} at {S}em{E}val-2020 Task 4: Commonsense Validation and Explanation Using Pretraining Language Model",
author = "Liu, Shilei and
Guo, Yu and
Li, BoChao and
Ren, Feiliang",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2020.semeval-1.70/",
doi = "10.18653/v1/2020.semeval-1.70",
pages = "562--568",
abstract = "This paper introduces our system for commonsense validation and explanation. For Sen-Making task, we use a novel pretraining language model based architecture to pick out one of the two given statements that is againstcommon sense. For Explanation task, we use a hint sentence mechanism to improve the performance greatly. In addition, we propose a subtask level transfer learning to share information between subtasks."
}
Markdown (Informal)
[LMVE at SemEval-2020 Task 4: Commonsense Validation and Explanation Using Pretraining Language Model](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2020.semeval-1.70/) (Liu et al., SemEval 2020)
ACL