@inproceedings{maronikolakis-etal-2021-identifying,
title = "Identifying Automatically Generated Headlines using Transformers",
author = {Maronikolakis, Antonis and
Sch{\"u}tze, Hinrich and
Stevenson, Mark},
editor = "Feldman, Anna and
Da San Martino, Giovanni and
Leberknight, Chris and
Nakov, Preslav",
booktitle = "Proceedings of the Fourth Workshop on NLP for Internet Freedom: Censorship, Disinformation, and Propaganda",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4if-1.1",
doi = "10.18653/v1/2021.nlp4if-1.1",
pages = "1--6",
abstract = "False information spread via the internet and social media influences public opinion and user activity, while generative models enable fake content to be generated faster and more cheaply than had previously been possible. In the not so distant future, identifying fake content generated by deep learning models will play a key role in protecting users from misinformation. To this end, a dataset containing human and computer-generated headlines was created and a user study indicated that humans were only able to identify the fake headlines in 47.8{\%} of the cases. However, the most accurate automatic approach, transformers, achieved an overall accuracy of 85.7{\%}, indicating that content generated from language models can be filtered out accurately.",
}
Markdown (Informal)
[Identifying Automatically Generated Headlines using Transformers](https://aclanthology.org/2021.nlp4if-1.1) (Maronikolakis et al., NLP4IF 2021)
ACL