@inproceedings{klein-nabi-2021-attention-based,
title = "Attention-based Contrastive Learning for {W}inograd Schemas",
author = "Klein, Tassilo and
Nabi, Moin",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-emnlp.208",
doi = "10.18653/v1/2021.findings-emnlp.208",
pages = "2428--2434",
abstract = "Self-supervised learning has recently attracted considerable attention in the NLP community for its ability to learn discriminative features using a contrastive objective. This paper investigates whether contrastive learning can be extended to Transfomer attention to tackling the Winograd Schema Challenge. To this end, we propose a novel self-supervised framework, leveraging a contrastive loss directly at the level of self-attention. Experimental analysis of our attention-based models on multiple datasets demonstrates superior commonsense reasoning capabilities. The proposed approach outperforms all comparable unsupervised approaches while occasionally surpassing supervised ones.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="klein-nabi-2021-attention-based">
<titleInfo>
<title>Attention-based Contrastive Learning for Winograd Schemas</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tassilo</namePart>
<namePart type="family">Klein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Moin</namePart>
<namePart type="family">Nabi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2021</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Self-supervised learning has recently attracted considerable attention in the NLP community for its ability to learn discriminative features using a contrastive objective. This paper investigates whether contrastive learning can be extended to Transfomer attention to tackling the Winograd Schema Challenge. To this end, we propose a novel self-supervised framework, leveraging a contrastive loss directly at the level of self-attention. Experimental analysis of our attention-based models on multiple datasets demonstrates superior commonsense reasoning capabilities. The proposed approach outperforms all comparable unsupervised approaches while occasionally surpassing supervised ones.</abstract>
<identifier type="citekey">klein-nabi-2021-attention-based</identifier>
<identifier type="doi">10.18653/v1/2021.findings-emnlp.208</identifier>
<location>
<url>https://aclanthology.org/2021.findings-emnlp.208</url>
</location>
<part>
<date>2021-nov</date>
<extent unit="page">
<start>2428</start>
<end>2434</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attention-based Contrastive Learning for Winograd Schemas
%A Klein, Tassilo
%A Nabi, Moin
%S Findings of the Association for Computational Linguistics: EMNLP 2021
%D 2021
%8 nov
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F klein-nabi-2021-attention-based
%X Self-supervised learning has recently attracted considerable attention in the NLP community for its ability to learn discriminative features using a contrastive objective. This paper investigates whether contrastive learning can be extended to Transfomer attention to tackling the Winograd Schema Challenge. To this end, we propose a novel self-supervised framework, leveraging a contrastive loss directly at the level of self-attention. Experimental analysis of our attention-based models on multiple datasets demonstrates superior commonsense reasoning capabilities. The proposed approach outperforms all comparable unsupervised approaches while occasionally surpassing supervised ones.
%R 10.18653/v1/2021.findings-emnlp.208
%U https://aclanthology.org/2021.findings-emnlp.208
%U https://doi.org/10.18653/v1/2021.findings-emnlp.208
%P 2428-2434
Markdown (Informal)
[Attention-based Contrastive Learning for Winograd Schemas](https://aclanthology.org/2021.findings-emnlp.208) (Klein & Nabi, Findings 2021)
ACL