@inproceedings{prosser-edwards-2024-everything,
title = "Not Everything Is Online Grooming: False Risk Finding in Large Language Model Assessments of Human Conversations",
author = "Prosser, Ellie and
Edwards, Matthew",
editor = "Mitkov, Ruslan and
Ezzini, Saad and
Ranasinghe, Tharindu and
Ezeani, Ignatius and
Khallaf, Nouran and
Acarturk, Cengiz and
Bradbury, Matthew and
El-Haj, Mo and
Rayson, Paul",
booktitle = "Proceedings of the First International Conference on Natural Language Processing and Artificial Intelligence for Cyber Security",
month = jul,
year = "2024",
address = "Lancaster, UK",
publisher = "International Conference on Natural Language Processing and Artificial Intelligence for Cyber Security",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.nlpaics-1.24/",
pages = "220--229",
abstract = "Large Language Models (LLMs) have rapidly been adopted by the general public, and as usage of these models becomes commonplace, they naturally will be used for increasingly human-centric tasks, including security advice and risk identification for personal situations. It is imperative that systems used in such a manner are well-calibrated. In this paper, 6 popular LLMs were evaluated for their propensity towards false or over-cautious risk finding in online interactions between real people, with a focus on the risk of online grooming, the advice generated for such contexts, and the impact of prompt specificity. Through an analysis of 3840 generated answers, it was found that models could find online grooming in even the most harmless of interactions, and that the generated advice could be harmful, judgemental, and controlling. We describe these shortcomings, and identify areas for improvement, including suggestions for future research directions."
}
Markdown (Informal)
[Not Everything Is Online Grooming: False Risk Finding in Large Language Model Assessments of Human Conversations](https://preview.aclanthology.org/fix-sig-urls/2024.nlpaics-1.24/) (Prosser & Edwards, NLPAICS 2024)
ACL