@inproceedings{bestgen-2020-last,
title = "{LAST} at {S}em{E}val-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and {L}ight{GBM}",
author = "Bestgen, Yves",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.218/",
doi = "10.18653/v1/2020.semeval-1.218",
pages = "1671--1677",
abstract = "To select tokens to be emphasised in short texts, a system mainly based on precomputed embedding models, such as BERT and ELMo, and LightGBM is proposed. Its performance is low. Additional analyzes suggest that its effectiveness is poor at predicting the highest emphasis scores while they are the most important for the challenge and that it is very sensitive to the specific instances provided during learning."
}
Markdown (Informal)
[LAST at SemEval-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and LightGBM](https://preview.aclanthology.org/fix-sig-urls/2020.semeval-1.218/) (Bestgen, SemEval 2020)
ACL