@inproceedings{huang-etal-2021-improving,
title = "Improving Unsupervised Commonsense Reasoning Using Knowledge-Enabled Natural Language Inference",
author = "Huang, Canming and
He, Weinan and
Liu, Yongmei",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.findings-emnlp.420/",
doi = "10.18653/v1/2021.findings-emnlp.420",
pages = "4875--4885",
abstract = "Recent methods based on pre-trained language models have shown strong supervised performance on commonsense reasoning. However, they rely on expensive data annotation and time-consuming training. Thus, we focus on unsupervised commonsense reasoning. We show the effectiveness of using a common framework, Natural Language Inference (NLI), to solve diverse commonsense reasoning tasks. By leveraging transfer learning from large NLI datasets, and injecting crucial knowledge from commonsense sources such as ATOMIC 2020 and ConceptNet, our method achieved state-of-the-art unsupervised performance on two commonsense reasoning tasks: WinoWhy and CommonsenseQA. Further analysis demonstrated the benefits of multiple categories of knowledge, but problems about quantities and antonyms are still challenging."
}
Markdown (Informal)
[Improving Unsupervised Commonsense Reasoning Using Knowledge-Enabled Natural Language Inference](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.findings-emnlp.420/) (Huang et al., Findings 2021)
ACL