@inproceedings{datta-roberts-2020-hybrid,
title = "A Hybrid Deep Learning Approach for Spatial Trigger Extraction from Radiology Reports",
author = "Datta, Surabhi and
Roberts, Kirk",
booktitle = "Proceedings of the Third International Workshop on Spatial Language Understanding",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.splu-1.6",
doi = "10.18653/v1/2020.splu-1.6",
pages = "50--55",
abstract = "Radiology reports contain important clinical information about patients which are often tied through spatial expressions. Spatial expressions (or triggers) are mainly used to describe the positioning of radiographic findings or medical devices with respect to some anatomical structures. As the expressions result from the mental visualization of the radiologist{'}s interpretations, they are varied and complex. The focus of this work is to automatically identify the spatial expression terms from three different radiology sub-domains. We propose a hybrid deep learning-based NLP method that includes {--} 1) generating a set of candidate spatial triggers by exact match with the known trigger terms from the training data, 2) applying domain-specific constraints to filter the candidate triggers, and 3) utilizing a BERT-based classifier to predict whether a candidate trigger is a true spatial trigger or not. The results are promising, with an improvement of 24 points in the average F1 measure compared to a standard BERT-based sequence labeler.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="datta-roberts-2020-hybrid">
<titleInfo>
<title>A Hybrid Deep Learning Approach for Spatial Trigger Extraction from Radiology Reports</title>
</titleInfo>
<name type="personal">
<namePart type="given">Surabhi</namePart>
<namePart type="family">Datta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kirk</namePart>
<namePart type="family">Roberts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third International Workshop on Spatial Language Understanding</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Radiology reports contain important clinical information about patients which are often tied through spatial expressions. Spatial expressions (or triggers) are mainly used to describe the positioning of radiographic findings or medical devices with respect to some anatomical structures. As the expressions result from the mental visualization of the radiologist’s interpretations, they are varied and complex. The focus of this work is to automatically identify the spatial expression terms from three different radiology sub-domains. We propose a hybrid deep learning-based NLP method that includes – 1) generating a set of candidate spatial triggers by exact match with the known trigger terms from the training data, 2) applying domain-specific constraints to filter the candidate triggers, and 3) utilizing a BERT-based classifier to predict whether a candidate trigger is a true spatial trigger or not. The results are promising, with an improvement of 24 points in the average F1 measure compared to a standard BERT-based sequence labeler.</abstract>
<identifier type="citekey">datta-roberts-2020-hybrid</identifier>
<identifier type="doi">10.18653/v1/2020.splu-1.6</identifier>
<location>
<url>https://aclanthology.org/2020.splu-1.6</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>50</start>
<end>55</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Hybrid Deep Learning Approach for Spatial Trigger Extraction from Radiology Reports
%A Datta, Surabhi
%A Roberts, Kirk
%S Proceedings of the Third International Workshop on Spatial Language Understanding
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F datta-roberts-2020-hybrid
%X Radiology reports contain important clinical information about patients which are often tied through spatial expressions. Spatial expressions (or triggers) are mainly used to describe the positioning of radiographic findings or medical devices with respect to some anatomical structures. As the expressions result from the mental visualization of the radiologist’s interpretations, they are varied and complex. The focus of this work is to automatically identify the spatial expression terms from three different radiology sub-domains. We propose a hybrid deep learning-based NLP method that includes – 1) generating a set of candidate spatial triggers by exact match with the known trigger terms from the training data, 2) applying domain-specific constraints to filter the candidate triggers, and 3) utilizing a BERT-based classifier to predict whether a candidate trigger is a true spatial trigger or not. The results are promising, with an improvement of 24 points in the average F1 measure compared to a standard BERT-based sequence labeler.
%R 10.18653/v1/2020.splu-1.6
%U https://aclanthology.org/2020.splu-1.6
%U https://doi.org/10.18653/v1/2020.splu-1.6
%P 50-55
Markdown (Informal)
[A Hybrid Deep Learning Approach for Spatial Trigger Extraction from Radiology Reports](https://aclanthology.org/2020.splu-1.6) (Datta & Roberts, SpLU 2020)
ACL