@inproceedings{meaney-etal-2020-smash,
title = "Smash at {S}em{E}val-2020 Task 7: Optimizing the Hyperparameters of {ERNIE} 2.0 for Humor Ranking and Rating",
author = "Meaney, J. A. and
Wilson, Steven and
Magdy, Walid",
editor = "Herbelot, Aurelie and
Zhu, Xiaodan and
Palmer, Alexis and
Schneider, Nathan and
May, Jonathan and
Shutova, Ekaterina",
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2020.semeval-1.137/",
doi = "10.18653/v1/2020.semeval-1.137",
pages = "1049--1054",
abstract = "The use of pre-trained language models such as BERT and ULMFiT has become increasingly popular in shared tasks, due to their powerful language modelling capabilities. Our entry to SemEval uses ERNIE 2.0, a language model which is pre-trained on a large number of tasks to enrich the semantic and syntactic information learned. ERNIE`s knowledge masking pre-training task is a unique method for learning about named entities, and we hypothesise that it may be of use in a dataset which is built on news headlines and which contains many named entities. We optimize the hyperparameters in a regression and classification model and find that the hyperparameters we selected helped to make bigger gains in the classification model than the regression model."
}
Markdown (Informal)
[Smash at SemEval-2020 Task 7: Optimizing the Hyperparameters of ERNIE 2.0 for Humor Ranking and Rating](https://preview.aclanthology.org/ingest_wac_2008/2020.semeval-1.137/) (Meaney et al., SemEval 2020)
ACL