@inproceedings{hu-etal-2024-bayesian,
title = "A {B}ayesian Approach to Harnessing the Power of {LLM}s in Authorship Attribution",
author = "Hu, Zhengmian and
Zheng, Tong and
Huang, Heng",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.733/",
doi = "10.18653/v1/2024.emnlp-main.733",
pages = "13216--13227",
abstract = "Authorship attribution aims to identify the origin or author of a document. Traditional approaches have heavily relied on manual features and fail to capture long-range correlations, limiting their effectiveness. Recent advancements leverage text embeddings from pre-trained language models, which require significant fine-tuning on labeled data, posing challenges in data dependency and limited interpretability. Large Language Models (LLMs), with their deep reasoning capabilities and ability to maintain long-range textual associations, offer a promising alternative. This study explores the potential of pre-trained LLMs in one-shot authorship attribution, specifically utilizing Bayesian approaches and probability outputs of LLMs. Our methodology calculates the probability that a text entails previous writings of an author, reflecting a more nuanced understanding of authorship. By utilizing only pre-trained models such as Llama-3-70B, our results on the IMDb and blog datasets show an impressive 85{\%} accuracy in one-shot authorship classification across ten authors. Our findings set new baselines for one-shot authorship analysis using LLMs and expand the application scope of these models in forensic linguistics. This work also includes extensive ablation studies to validate our approach."
}
Markdown (Informal)
[A Bayesian Approach to Harnessing the Power of LLMs in Authorship Attribution](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.733/) (Hu et al., EMNLP 2024)
ACL