@inproceedings{fajcik-etal-2020-fit,
title = "{BUT}-{FIT} at {S}em{E}val-2020 Task 5: Automatic Detection of Counterfactual Statements with Deep Pre-trained Language Representation Models",
author = "Fajcik, Martin and
Jon, Josef and
Docekal, Martin and
Smrz, Pavel",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.semeval-1.53/",
doi = "10.18653/v1/2020.semeval-1.53",
pages = "437--444",
abstract = "This paper describes BUT-FIT`s submission at SemEval-2020 Task 5: Modelling Causal Reasoning in Language: Detecting Counterfactuals. The challenge focused on detecting whether a given statement contains a counterfactual (Subtask 1) and extracting both antecedent and consequent parts of the counterfactual from the text (Subtask 2). We experimented with various state-of-the-art language representation models (LRMs). We found RoBERTa LRM to perform the best in both subtasks. We achieved the first place in both exact match and F1 for Subtask 2 and ranked second for Subtask 1."
}
Markdown (Informal)
[BUT-FIT at SemEval-2020 Task 5: Automatic Detection of Counterfactual Statements with Deep Pre-trained Language Representation Models](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.semeval-1.53/) (Fajcik et al., SemEval 2020)
ACL