@inproceedings{abi-akl-etal-2020-yseop,
title = "Yseop at {S}em{E}val-2020 Task 5: Cascaded {BERT} Language Model for Counterfactual Statement Analysis",
author = "Abi-Akl, Hanna and
Mariko, Dominique and
Labidurie, Estelle",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.57",
doi = "10.18653/v1/2020.semeval-1.57",
pages = "468--478",
abstract = "In this paper, we explore strategies to detect and evaluate counterfactual sentences. We describe our system for SemEval-2020 Task 5: Modeling Causal Reasoning in Language: Detecting Counterfactuals. We use a BERT base model for the classification task and build a hybrid BERT Multi-Layer Perceptron system to handle the sequence identification task. Our experiments show that while introducing syntactic and semantic features does little in improving the system in the classification task, using these types of features as cascaded linear inputs to fine-tune the sequence-delimiting ability of the model ensures it outperforms other similar-purpose complex systems like BiLSTM-CRF in the second task. Our system achieves an F1 score of 85.00{\%} in Task 1 and 83.90{\%} in Task 2.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="abi-akl-etal-2020-yseop">
<titleInfo>
<title>Yseop at SemEval-2020 Task 5: Cascaded BERT Language Model for Counterfactual Statement Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hanna</namePart>
<namePart type="family">Abi-Akl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dominique</namePart>
<namePart type="family">Mariko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Estelle</namePart>
<namePart type="family">Labidurie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourteenth Workshop on Semantic Evaluation</title>
</titleInfo>
<originInfo>
<publisher>International Committee for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we explore strategies to detect and evaluate counterfactual sentences. We describe our system for SemEval-2020 Task 5: Modeling Causal Reasoning in Language: Detecting Counterfactuals. We use a BERT base model for the classification task and build a hybrid BERT Multi-Layer Perceptron system to handle the sequence identification task. Our experiments show that while introducing syntactic and semantic features does little in improving the system in the classification task, using these types of features as cascaded linear inputs to fine-tune the sequence-delimiting ability of the model ensures it outperforms other similar-purpose complex systems like BiLSTM-CRF in the second task. Our system achieves an F1 score of 85.00% in Task 1 and 83.90% in Task 2.</abstract>
<identifier type="citekey">abi-akl-etal-2020-yseop</identifier>
<identifier type="doi">10.18653/v1/2020.semeval-1.57</identifier>
<location>
<url>https://aclanthology.org/2020.semeval-1.57</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>468</start>
<end>478</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Yseop at SemEval-2020 Task 5: Cascaded BERT Language Model for Counterfactual Statement Analysis
%A Abi-Akl, Hanna
%A Mariko, Dominique
%A Labidurie, Estelle
%S Proceedings of the Fourteenth Workshop on Semantic Evaluation
%D 2020
%8 dec
%I International Committee for Computational Linguistics
%C Barcelona (online)
%F abi-akl-etal-2020-yseop
%X In this paper, we explore strategies to detect and evaluate counterfactual sentences. We describe our system for SemEval-2020 Task 5: Modeling Causal Reasoning in Language: Detecting Counterfactuals. We use a BERT base model for the classification task and build a hybrid BERT Multi-Layer Perceptron system to handle the sequence identification task. Our experiments show that while introducing syntactic and semantic features does little in improving the system in the classification task, using these types of features as cascaded linear inputs to fine-tune the sequence-delimiting ability of the model ensures it outperforms other similar-purpose complex systems like BiLSTM-CRF in the second task. Our system achieves an F1 score of 85.00% in Task 1 and 83.90% in Task 2.
%R 10.18653/v1/2020.semeval-1.57
%U https://aclanthology.org/2020.semeval-1.57
%U https://doi.org/10.18653/v1/2020.semeval-1.57
%P 468-478
Markdown (Informal)
[Yseop at SemEval-2020 Task 5: Cascaded BERT Language Model for Counterfactual Statement Analysis](https://aclanthology.org/2020.semeval-1.57) (Abi-Akl et al., SemEval 2020)
ACL