@inproceedings{dubois-etal-2025-mosaic-multiple,
title = "{MOSAIC}: Multiple Observers Spotting {AI} Content",
author = "Dubois, Matthieu and
Yvon, Fran{\c{c}}ois and
Piantanida, Pablo",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.1244/",
pages = "24230--24247",
ISBN = "979-8-89176-256-5",
abstract = "The dissemination of Large Language Models (LLMs), trained at scale, and endowed with powerful text-generating abilities, has made it easier for all to produce harmful, toxic, faked or forged content. In response, various proposals have been made to automatically discriminate artificially generated from human-written texts, typically framing the problem as a binary classification problem. Early approaches evaluate an input document with a well-chosen detector LLM, assuming that low-perplexity scores reliably signal machine-made content. More recent systems instead consider two LLMs and compare their probability distributions over the document to further discriminate when perplexity alone cannot. However, using a fixed pair of models can induce brittleness in performance. We extend these approaches to the ensembling of several LLMs and derive a new, theoretically grounded approach to combine their respective strengths. Our experiments, using a variety of generator LLMs, suggest that this approach effectively harnesses each model{'}s capabilities, leading to strong detection performance on a variety of domains."
}
Markdown (Informal)
[MOSAIC: Multiple Observers Spotting AI Content](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.1244/) (Dubois et al., Findings 2025)
ACL
- Matthieu Dubois, François Yvon, and Pablo Piantanida. 2025. MOSAIC: Multiple Observers Spotting AI Content. In Findings of the Association for Computational Linguistics: ACL 2025, pages 24230–24247, Vienna, Austria. Association for Computational Linguistics.