@inproceedings{patil-baths-2020-cnrl,
title = "{CNRL} at {S}em{E}val-2020 Task 5: Modelling Causal Reasoning in Language with Multi-Head Self-Attention Weights Based Counterfactual Detection",
author = "Patil, Rajaswa and
Baths, Veeky",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.55/",
doi = "10.18653/v1/2020.semeval-1.55",
pages = "451--457",
abstract = "In this paper, we describe an approach for modelling causal reasoning in natural language by detecting counterfactuals in text using multi-head self-attention weights. We use pre-trained transformer models to extract contextual embeddings and self-attention weights from the text. We show the use of convolutional layers to extract task-specific features from these self-attention weights. Further, we describe a fine-tuning approach with a common base model for knowledge sharing between the two closely related sub-tasks for counterfactual detection. We analyze and compare the performance of various transformer models in our experiments. Finally, we perform a qualitative analysis with the multi-head self-attention weights to interpret our models' dynamics."
}
Markdown (Informal)
[CNRL at SemEval-2020 Task 5: Modelling Causal Reasoning in Language with Multi-Head Self-Attention Weights Based Counterfactual Detection](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.55/) (Patil & Baths, SemEval 2020)
ACL