@inproceedings{singh-etal-2022-silpa,
title = "silpa{\_}nlp at {S}em{E}val-2022 Tasks 11: Transformer based {NER} models for {H}indi and {B}angla languages",
author = "Singh, Sumit and
Jawale, Pawankumar and
Tiwary, Uma",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.211/",
doi = "10.18653/v1/2022.semeval-1.211",
pages = "1536--1542",
abstract = "We present Transformer based pretrained models, which are fine-tuned for Named Entity Recognition (NER) task. Our team participated in SemEval-2022 Task 11 MultiCoNER: Multilingual Complex Named Entity Recognition task for Hindi and Bangla. Result comparison of six models (mBERT, IndicBERT, MuRIL (Base), MuRIL (Large), XLM-RoBERTa (Base) and XLM-RoBERTa (Large) ) has been performed. It is found that among these models MuRIL (Large) model performs better for both the Hindi and Bangla languages. Its F1-Scores for Hindi and Bangla are 0.69 and 0.59 respectively."
}
Markdown (Informal)
[silpa_nlp at SemEval-2022 Tasks 11: Transformer based NER models for Hindi and Bangla languages](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.211/) (Singh et al., SemEval 2022)
ACL