@inproceedings{stormer-etal-2023-sam,
title = "{S}am Miller at {S}em{E}val-2023 Task 5: Classification and Type-specific Spoiler Extraction Using {XLNET} and Other Transformer Models",
author = {St{\"o}rmer, Pia and
Esser, Tobias and
Thomasius, Patrick},
editor = {Ojha, Atul Kr. and
Do{\u{g}}ru{\"o}z, A. Seza and
Da San Martino, Giovanni and
Tayyar Madabushi, Harish and
Kumar, Ritesh and
Sartori, Elisa},
booktitle = "Proceedings of the 17th International Workshop on Semantic Evaluation (SemEval-2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.semeval-1.169/",
doi = "10.18653/v1/2023.semeval-1.169",
pages = "1217--1224",
abstract = "This paper proposes an approach to classify andan approach to generate spoilers for clickbaitarticles and posts. For the spoiler classification,XLNET was trained to fine-tune a model. Withan accuracy of 0.66, 2 out of 3 spoilers arepredicted accurately. The spoiler generationapproach involves preprocessing the clickbaittext and post-processing the output to fit thespoiler type. The approach is evaluated on atest dataset of 1000 posts, with the best resultfor spoiler generation achieved by fine-tuninga RoBERTa Large model with a small learningrate and sample size, reaching a BLEU scoreof 0.311. The paper provides an overview ofthe models and techniques used and discussesthe experimental setup."
}
Markdown (Informal)
[Sam Miller at SemEval-2023 Task 5: Classification and Type-specific Spoiler Extraction Using XLNET and Other Transformer Models](https://preview.aclanthology.org/fix-sig-urls/2023.semeval-1.169/) (Störmer et al., SemEval 2023)
ACL