@inproceedings{chen-etal-2023-ncuee-nlp,
title = "{NCUEE}-{NLP} at {S}em{E}val-2023 Task 7: Ensemble Biomedical {L}ink{BERT} Transformers in Multi-evidence Natural Language Inference for Clinical Trial Data",
author = "Chen, Chao-Yi and
Tien, Kao-Yuan and
Cheng, Yuan-Hao and
Lee, Lung-Hao",
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Da San Martino, Giovanni and
Tayyar Madabushi, Harish and
Kumar, Ritesh and
Sartori, Elisa},
booktitle = "Proceedings of the 17th International Workshop on Semantic Evaluation (SemEval-2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.semeval-1.107/",
doi = "10.18653/v1/2023.semeval-1.107",
pages = "776--781",
abstract = "This study describes the model design of the NCUEE-NLP system for the SemEval-2023 NLI4CT task that focuses on multi-evidence natural language inference for clinical trial data. We use the LinkBERT transformer in the biomedical domain (denoted as BioLinkBERT) as our main system architecture. First, a set of sentences in clinical trial reports is extracted as evidence for premise-statement inference. This identified evidence is then used to determine the inference relation (i.e., entailment or contradiction). Finally, a soft voting ensemble mechanism is applied to enhance the system performance. For Subtask 1 on textual entailment, our best submission had an F1-score of 0.7091, ranking sixth among all 30 participating teams. For Subtask 2 on evidence retrieval, our best result obtained an F1-score of 0.7940, ranking ninth of 19 submissions."
}
Markdown (Informal)
[NCUEE-NLP at SemEval-2023 Task 7: Ensemble Biomedical LinkBERT Transformers in Multi-evidence Natural Language Inference for Clinical Trial Data](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.semeval-1.107/) (Chen et al., SemEval 2023)
ACL