@inproceedings{zahid-etal-2025-bedaa,
title = "{BEDAA}: {B}ayesian Enhanced {D}e{BERT}a for Uncertainty-Aware Authorship Attribution",
author = "Zahid, Iqra and
Sun, Youcheng and
Batista-Navarro, Riza",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.924/",
pages = "17952--17966",
ISBN = "979-8-89176-256-5",
abstract = "Authorship Attribution (AA) seeks to identify the author of a given text, yet existing methods often struggle with trustworthiness and interpretability, particularly across different domains, languages, and stylistic variations. These challenges arise from the absence of uncertainty quantification and the inability of current models to adapt to diverse authorship tasks. To address these limitations, we introduce BEDAA, a Bayesian-Enhanced DeBERTa framework that integrates Bayesian reasoning with transformer-based language models to enable uncertainty-aware and interpretable authorship attribution. BEDAA achieves up to 19.69{\%} improvement in F1-score across multiple authorship attribution tasks, including binary, multiclass, and dynamic authorship detection. By incorporating confidence ranking, uncertainty decomposition, and probabilistic reasoning, BEDAA improves robustness while offering transparent decision-making processes. Furthermore, BEDAA extends beyond traditional AA by demonstrating its effectiveness in human vs. machine-generated text classification, code authorship detection, and cross-lingual attribution. These advances establish BEDAA as a generalised, interpretable, and adaptable framework for modern authorship attribution challenges."
}
Markdown (Informal)
[BEDAA: Bayesian Enhanced DeBERTa for Uncertainty-Aware Authorship Attribution](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.924/) (Zahid et al., Findings 2025)
ACL