@inproceedings{khandelwal-sawant-2020-negbert,
title = "{N}eg{BERT}: A Transfer Learning Approach for Negation Detection and Scope Resolution",
author = "Khandelwal, Aditya and
Sawant, Suraj",
booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2020.lrec-1.704",
pages = "5739--5748",
abstract = "Negation is an important characteristic of language, and a major component of information extraction from text. This subtask is of considerable importance to the biomedical domain. Over the years, multiple approaches have been explored to address this problem: Rule-based systems, Machine Learning classifiers, Conditional Random Field models, CNNs and more recently BiLSTMs. In this paper, we look at applying Transfer Learning to this problem. First, we extensively review previous literature addressing Negation Detection and Scope Resolution across the 3 datasets that have gained popularity over the years: the BioScope Corpus, the Sherlock dataset, and the SFU Review Corpus. We then explore the decision choices involved with using BERT, a popular transfer learning model, for this task, and report state-of-the-art results for scope resolution across all 3 datasets. Our model, referred to as NegBERT, achieves a token level F1 score on scope resolution of 92.36 on the Sherlock dataset, 95.68 on the BioScope Abstracts subcorpus, 91.24 on the BioScope Full Papers subcorpus, 90.95 on the SFU Review Corpus, outperforming the previous state-of-the-art systems by a significant margin. We also analyze the model{'}s generalizability to datasets on which it is not trained.",
language = "English",
ISBN = "979-10-95546-34-4",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khandelwal-sawant-2020-negbert">
<titleInfo>
<title>NegBERT: A Transfer Learning Approach for Negation Detection and Scope Resolution</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aditya</namePart>
<namePart type="family">Khandelwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Suraj</namePart>
<namePart type="family">Sawant</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-may</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 12th Language Resources and Evaluation Conference</title>
</titleInfo>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-10-95546-34-4</identifier>
</relatedItem>
<abstract>Negation is an important characteristic of language, and a major component of information extraction from text. This subtask is of considerable importance to the biomedical domain. Over the years, multiple approaches have been explored to address this problem: Rule-based systems, Machine Learning classifiers, Conditional Random Field models, CNNs and more recently BiLSTMs. In this paper, we look at applying Transfer Learning to this problem. First, we extensively review previous literature addressing Negation Detection and Scope Resolution across the 3 datasets that have gained popularity over the years: the BioScope Corpus, the Sherlock dataset, and the SFU Review Corpus. We then explore the decision choices involved with using BERT, a popular transfer learning model, for this task, and report state-of-the-art results for scope resolution across all 3 datasets. Our model, referred to as NegBERT, achieves a token level F1 score on scope resolution of 92.36 on the Sherlock dataset, 95.68 on the BioScope Abstracts subcorpus, 91.24 on the BioScope Full Papers subcorpus, 90.95 on the SFU Review Corpus, outperforming the previous state-of-the-art systems by a significant margin. We also analyze the model’s generalizability to datasets on which it is not trained.</abstract>
<identifier type="citekey">khandelwal-sawant-2020-negbert</identifier>
<location>
<url>https://aclanthology.org/2020.lrec-1.704</url>
</location>
<part>
<date>2020-may</date>
<extent unit="page">
<start>5739</start>
<end>5748</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NegBERT: A Transfer Learning Approach for Negation Detection and Scope Resolution
%A Khandelwal, Aditya
%A Sawant, Suraj
%S Proceedings of the 12th Language Resources and Evaluation Conference
%D 2020
%8 may
%I European Language Resources Association
%C Marseille, France
%@ 979-10-95546-34-4
%G English
%F khandelwal-sawant-2020-negbert
%X Negation is an important characteristic of language, and a major component of information extraction from text. This subtask is of considerable importance to the biomedical domain. Over the years, multiple approaches have been explored to address this problem: Rule-based systems, Machine Learning classifiers, Conditional Random Field models, CNNs and more recently BiLSTMs. In this paper, we look at applying Transfer Learning to this problem. First, we extensively review previous literature addressing Negation Detection and Scope Resolution across the 3 datasets that have gained popularity over the years: the BioScope Corpus, the Sherlock dataset, and the SFU Review Corpus. We then explore the decision choices involved with using BERT, a popular transfer learning model, for this task, and report state-of-the-art results for scope resolution across all 3 datasets. Our model, referred to as NegBERT, achieves a token level F1 score on scope resolution of 92.36 on the Sherlock dataset, 95.68 on the BioScope Abstracts subcorpus, 91.24 on the BioScope Full Papers subcorpus, 90.95 on the SFU Review Corpus, outperforming the previous state-of-the-art systems by a significant margin. We also analyze the model’s generalizability to datasets on which it is not trained.
%U https://aclanthology.org/2020.lrec-1.704
%P 5739-5748
Markdown (Informal)
[NegBERT: A Transfer Learning Approach for Negation Detection and Scope Resolution](https://aclanthology.org/2020.lrec-1.704) (Khandelwal & Sawant, LREC 2020)
ACL