@inproceedings{shin-etal-2020-ids,
title = "{IDS} at {S}em{E}val-2020 Task 10: Does Pre-trained Language Model Know What to Emphasize?",
author = "Shin, Jaeyoul and
Kim, Taeuk and
Lee, Sang-goo",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.185/",
doi = "10.18653/v1/2020.semeval-1.185",
pages = "1371--1376",
abstract = "We propose a novel method that enables us to determine words that deserve to be emphasized from written text in visual media, relying only on the information from the self-attention distributions of pre-trained language models (PLMs). With extensive experiments and analyses, we show that 1) our zero-shot approach is superior to a reasonable baseline that adopts TF-IDF and that 2) there exist several attention heads in PLMs specialized for emphasis selection, confirming that PLMs are capable of recognizing important words in sentences."
}
Markdown (Informal)
[IDS at SemEval-2020 Task 10: Does Pre-trained Language Model Know What to Emphasize?](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.semeval-1.185/) (Shin et al., SemEval 2020)
ACL