@inproceedings{aloui-etal-2020-slice,
title = "{SLICE}: Supersense-based Lightweight Interpretable Contextual Embeddings",
author = "Aloui, Cindy and
Ramisch, Carlos and
Nasr, Alexis and
Barque, Lucie",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.coling-main.298/",
doi = "10.18653/v1/2020.coling-main.298",
pages = "3357--3370",
abstract = "Contextualised embeddings such as BERT have become de facto state-of-the-art references in many NLP applications, thanks to their impressive performances. However, their opaqueness makes it hard to interpret their behaviour. SLICE is a hybrid model that combines supersense labels with contextual embeddings. We introduce a weakly supervised method to learn interpretable embeddings from raw corpora and small lists of seed words. Our model is able to represent both a word and its context as embeddings into the same compact space, whose dimensions correspond to interpretable supersenses. We assess the model in a task of supersense tagging for French nouns. The little amount of supervision required makes it particularly well suited for low-resourced scenarios. Thanks to its interpretability, we perform linguistic analyses about the predicted supersenses in terms of input word and context representations."
}
Markdown (Informal)
[SLICE: Supersense-based Lightweight Interpretable Contextual Embeddings](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2020.coling-main.298/) (Aloui et al., COLING 2020)
ACL