@inproceedings{nguyen-huynh-2022-dangnt,
title = "{DANGNT}-{SGU} at {S}em{E}val-2022 Task 11: Using Pre-trained Language Model for Complex Named Entity Recognition",
author = "Nguyen, Dang and
Huynh, Huy Khac Nguyen",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.203/",
doi = "10.18653/v1/2022.semeval-1.203",
pages = "1483--1487",
abstract = "In this paper, we describe a system that we built to participate in the SemEval 2022 Task 11: MultiCoNER Multilingual Complex Named Entity Recognition, specifically the track Mono-lingual in English. To construct this system, we used Pre-trained Language Models (PLMs). Especially, the Pre-trained Model base on BERT is applied for the task of recognizing named entities by fine-tuning method. We performed the evaluation on two test datasets of the shared task: the Practice Phase and the Evaluation Phase of the competition."
}
Markdown (Informal)
[DANGNT-SGU at SemEval-2022 Task 11: Using Pre-trained Language Model for Complex Named Entity Recognition](https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.203/) (Nguyen & Huynh, SemEval 2022)
ACL