@inproceedings{ali-etal-2025-hlu,
title = "{HLU}: Human Vs {LLM} Generated Text Detection Dataset for {U}rdu at Multiple Granularities",
author = "Ali, Iqra and
Atuhurra, Jesse and
Kamigaito, Hidetaka and
Watanabe, Taro",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.235/",
pages = "3495--3510",
abstract = "The rise of large language models (LLMs) generating human-like text has raised concerns about misuse, especially in low-resource languages like Urdu. To address this gap, we introduce the HLU dataset, which consists of three datasets: Document, Paragraph, and Sentence level. The document-level dataset contains 1,014 instances of human-written and LLM-generated articles across 13 domains, while the paragraph and sentence-level datasets each contain 667 instances. We conducted both human and automatic evaluations. In the human evaluation, the average accuracy at the document level was 35{\%}, while at the paragraph and sentence levels, accuracies were 75.68{\%} and 88.45{\%}, respectively. For automatic evaluation, we finetuned the XLMRoBERTa model for both monolingual and multilingual settings achieving consistent results in both. Additionally, we assessed the performance of GPT4 and Claude3Opus using zero-shot prompting. Our experiments and evaluations indicate that distinguishing between human and machine-generated text is challenging for both humans and LLMs, marking a significant step in addressing this issue in Urdu."
}
Markdown (Informal)
[HLU: Human Vs LLM Generated Text Detection Dataset for Urdu at Multiple Granularities](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.235/) (Ali et al., COLING 2025)
ACL