@inproceedings{da-kasai-2019-cracking,
title = "Cracking the Contextual Commonsense Code: Understanding Commonsense Reasoning Aptitude of Deep Contextual Representations",
author = "Da, Jeff and
Kasai, Jungo",
editor = "Ostermann, Simon and
Zhang, Sheng and
Roth, Michael and
Clark, Peter",
booktitle = "Proceedings of the First Workshop on Commonsense Inference in Natural Language Processing",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/remove-affiliations/D19-6001/",
doi = "10.18653/v1/D19-6001",
pages = "1--12",
abstract = "Pretrained deep contextual representations have advanced the state-of-the-art on various commonsense NLP tasks, but we lack a concrete understanding of the capability of these models. Thus, we investigate and challenge several aspects of BERT`s commonsense representation abilities. First, we probe BERT`s ability to classify various object attributes, demonstrating that BERT shows a strong ability in encoding various commonsense features in its embedding space, but is still deficient in many areas. Next, we show that, by augmenting BERT`s pretraining data with additional data related to the deficient attributes, we are able to improve performance on a downstream commonsense reasoning task while using a minimal amount of data. Finally, we develop a method of fine-tuning knowledge graphs embeddings alongside BERT and show the continued importance of explicit knowledge graphs."
}
Markdown (Informal)
[Cracking the Contextual Commonsense Code: Understanding Commonsense Reasoning Aptitude of Deep Contextual Representations](https://preview.aclanthology.org/remove-affiliations/D19-6001/) (Da & Kasai, 2019)
ACL