@inproceedings{benlahbib-etal-2021-lisac,
title = "{LISAC} {FSDM} {USMBA} at {S}em{E}val-2021 Task 5: Tackling Toxic Spans Detection Challenge with Supervised {S}pan{BERT}-based Model and Unsupervised {LIME}-based Model",
author = "Benlahbib, Abdessamad and
Alami, Ahmed and
Alami, Hamza",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.semeval-1.116/",
doi = "10.18653/v1/2021.semeval-1.116",
pages = "865--869",
abstract = "Toxic spans detection is an emerging challenge that aims to find toxic spans within a toxic text. In this paper, we describe our solutions to tackle toxic spans detection. The first solution, which follows a supervised approach, is based on SpanBERT model. This latter is intended to better embed and predict spans of text. The second solution, which adopts an unsupervised approach, combines linear support vector machine with the Local Interpretable Model-Agnostic Explanations (LIME). This last is used to interpret predictions of learning-based models. Our supervised model outperformed the unsupervised model and achieved the f-score of 67,84{\%} (ranked 22/85) in Task 5 at SemEval-2021: Toxic Spans Detection."
}
Markdown (Informal)
[LISAC FSDM USMBA at SemEval-2021 Task 5: Tackling Toxic Spans Detection Challenge with Supervised SpanBERT-based Model and Unsupervised LIME-based Model](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.semeval-1.116/) (Benlahbib et al., SemEval 2021)
ACL