@inproceedings{bestgen-2020-last,
title = "{LAST} at {S}em{E}val-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and {L}ight{GBM}",
author = "Bestgen, Yves",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.218",
doi = "10.18653/v1/2020.semeval-1.218",
pages = "1671--1677",
abstract = "To select tokens to be emphasised in short texts, a system mainly based on precomputed embedding models, such as BERT and ELMo, and LightGBM is proposed. Its performance is low. Additional analyzes suggest that its effectiveness is poor at predicting the highest emphasis scores while they are the most important for the challenge and that it is very sensitive to the specific instances provided during learning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bestgen-2020-last">
<titleInfo>
<title>LAST at SemEval-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and LightGBM</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yves</namePart>
<namePart type="family">Bestgen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-dec</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourteenth Workshop on Semantic Evaluation</title>
</titleInfo>
<originInfo>
<publisher>International Committee for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>To select tokens to be emphasised in short texts, a system mainly based on precomputed embedding models, such as BERT and ELMo, and LightGBM is proposed. Its performance is low. Additional analyzes suggest that its effectiveness is poor at predicting the highest emphasis scores while they are the most important for the challenge and that it is very sensitive to the specific instances provided during learning.</abstract>
<identifier type="citekey">bestgen-2020-last</identifier>
<identifier type="doi">10.18653/v1/2020.semeval-1.218</identifier>
<location>
<url>https://aclanthology.org/2020.semeval-1.218</url>
</location>
<part>
<date>2020-dec</date>
<extent unit="page">
<start>1671</start>
<end>1677</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LAST at SemEval-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and LightGBM
%A Bestgen, Yves
%S Proceedings of the Fourteenth Workshop on Semantic Evaluation
%D 2020
%8 dec
%I International Committee for Computational Linguistics
%C Barcelona (online)
%F bestgen-2020-last
%X To select tokens to be emphasised in short texts, a system mainly based on precomputed embedding models, such as BERT and ELMo, and LightGBM is proposed. Its performance is low. Additional analyzes suggest that its effectiveness is poor at predicting the highest emphasis scores while they are the most important for the challenge and that it is very sensitive to the specific instances provided during learning.
%R 10.18653/v1/2020.semeval-1.218
%U https://aclanthology.org/2020.semeval-1.218
%U https://doi.org/10.18653/v1/2020.semeval-1.218
%P 1671-1677
Markdown (Informal)
[LAST at SemEval-2020 Task 10: Finding Tokens to Emphasise in Short Written Texts with Precomputed Embedding Models and LightGBM](https://aclanthology.org/2020.semeval-1.218) (Bestgen, SemEval 2020)
ACL