@inproceedings{zeng-zubiaga-2021-qmul,
title = "{QMUL}-{SDS} at {SCIVER}: Step-by-Step Binary Classification for Scientific Claim Verification",
author = "Zeng, Xia and
Zubiaga, Arkaitz",
booktitle = "Proceedings of the Second Workshop on Scholarly Document Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.sdp-1.15",
doi = "10.18653/v1/2021.sdp-1.15",
pages = "116--123",
abstract = "Scientific claim verification is a unique challenge that is attracting increasing interest. The SCIVER shared task offers a benchmark scenario to test and compare claim verification approaches by participating teams and consists in three steps: relevant abstract selection, rationale selection and label prediction. In this paper, we present team QMUL-SDS{'}s participation in the shared task. We propose an approach that performs scientific claim verification by doing binary classifications step-by-step. We trained a BioBERT-large classifier to select abstracts based on pairwise relevance assessments for each {\textless}claim, title of the abstract{\textgreater} and continued to train it to select rationales out of each retrieved abstract based on {\textless}claim, sentence{\textgreater}. We then propose a two-step setting for label prediction, i.e. first predicting {``}NOT{\_}ENOUGH{\_}INFO{''} or {``}ENOUGH{\_}INFO{''}, then label those marked as {``}ENOUGH{\_}INFO{''} as either {``}SUPPORT{''} or {``}CONTRADICT{''}. Compared to the baseline system, we achieve substantial improvements on the dev set. As a result, our team is the No. 4 team on the leaderboard.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zeng-zubiaga-2021-qmul">
<titleInfo>
<title>QMUL-SDS at SCIVER: Step-by-Step Binary Classification for Scientific Claim Verification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xia</namePart>
<namePart type="family">Zeng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arkaitz</namePart>
<namePart type="family">Zubiaga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-jun</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Scholarly Document Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Scientific claim verification is a unique challenge that is attracting increasing interest. The SCIVER shared task offers a benchmark scenario to test and compare claim verification approaches by participating teams and consists in three steps: relevant abstract selection, rationale selection and label prediction. In this paper, we present team QMUL-SDS’s participation in the shared task. We propose an approach that performs scientific claim verification by doing binary classifications step-by-step. We trained a BioBERT-large classifier to select abstracts based on pairwise relevance assessments for each \textlessclaim, title of the abstract\textgreater and continued to train it to select rationales out of each retrieved abstract based on \textlessclaim, sentence\textgreater. We then propose a two-step setting for label prediction, i.e. first predicting “NOT_ENOUGH_INFO” or “ENOUGH_INFO”, then label those marked as “ENOUGH_INFO” as either “SUPPORT” or “CONTRADICT”. Compared to the baseline system, we achieve substantial improvements on the dev set. As a result, our team is the No. 4 team on the leaderboard.</abstract>
<identifier type="citekey">zeng-zubiaga-2021-qmul</identifier>
<identifier type="doi">10.18653/v1/2021.sdp-1.15</identifier>
<location>
<url>https://aclanthology.org/2021.sdp-1.15</url>
</location>
<part>
<date>2021-jun</date>
<extent unit="page">
<start>116</start>
<end>123</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T QMUL-SDS at SCIVER: Step-by-Step Binary Classification for Scientific Claim Verification
%A Zeng, Xia
%A Zubiaga, Arkaitz
%S Proceedings of the Second Workshop on Scholarly Document Processing
%D 2021
%8 jun
%I Association for Computational Linguistics
%C Online
%F zeng-zubiaga-2021-qmul
%X Scientific claim verification is a unique challenge that is attracting increasing interest. The SCIVER shared task offers a benchmark scenario to test and compare claim verification approaches by participating teams and consists in three steps: relevant abstract selection, rationale selection and label prediction. In this paper, we present team QMUL-SDS’s participation in the shared task. We propose an approach that performs scientific claim verification by doing binary classifications step-by-step. We trained a BioBERT-large classifier to select abstracts based on pairwise relevance assessments for each \textlessclaim, title of the abstract\textgreater and continued to train it to select rationales out of each retrieved abstract based on \textlessclaim, sentence\textgreater. We then propose a two-step setting for label prediction, i.e. first predicting “NOT_ENOUGH_INFO” or “ENOUGH_INFO”, then label those marked as “ENOUGH_INFO” as either “SUPPORT” or “CONTRADICT”. Compared to the baseline system, we achieve substantial improvements on the dev set. As a result, our team is the No. 4 team on the leaderboard.
%R 10.18653/v1/2021.sdp-1.15
%U https://aclanthology.org/2021.sdp-1.15
%U https://doi.org/10.18653/v1/2021.sdp-1.15
%P 116-123
Markdown (Informal)
[QMUL-SDS at SCIVER: Step-by-Step Binary Classification for Scientific Claim Verification](https://aclanthology.org/2021.sdp-1.15) (Zeng & Zubiaga, sdp 2021)
ACL