@inproceedings{liu-yu-2019-blcu,
title = "{BLCU}-{NLP} at {COIN}-Shared Task1: Stagewise Fine-tuning {BERT} for Commonsense Inference in Everyday Narrations",
author = "Liu, Chunhua and
Yu, Dong",
editor = "Ostermann, Simon and
Zhang, Sheng and
Roth, Michael and
Clark, Peter",
booktitle = "Proceedings of the First Workshop on Commonsense Inference in Natural Language Processing",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-6012/",
doi = "10.18653/v1/D19-6012",
pages = "99--103",
abstract = "This paper describes our system for COIN Shared Task 1: Commonsense Inference in Everyday Narrations. To inject more external knowledge to better reason over the narrative passage, question and answer, the system adopts a stagewise fine-tuning method based on pre-trained BERT model. More specifically, the first stage is to fine-tune on addi- tional machine reading comprehension dataset to learn more commonsense knowledge. The second stage is to fine-tune on target-task (MCScript2.0) with MCScript (2018) dataset assisted. Experimental results show that our system achieves significant improvements over the baseline systems with 84.2{\%} accuracy on the official test dataset."
}
Markdown (Informal)
[BLCU-NLP at COIN-Shared Task1: Stagewise Fine-tuning BERT for Commonsense Inference in Everyday Narrations](https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-6012/) (Liu & Yu, 2019)
ACL