@inproceedings{trivedi-etal-2026-much,
title = "``So, How Much Do {LLM}s Hallucinate on Low-Resource Languages?'' A Quantitative and Qualitative Analysis",
author = "Trivedi, Kushal and
Shaikh, Murtuza and
Sharma, Sriyansh",
editor = "Hettiarachchi, Hansi and
Ranasinghe, Tharindu and
Plum, Alistair and
Rayson, Paul and
Mitkov, Ruslan and
Gaber, Mohamed and
Premasiri, Damith and
Tan, Fiona Anting and
Uyangodage, Lasitha",
booktitle = "Proceedings of the Second Workshop on Language Models for Low-Resource Languages ({L}o{R}es{LM} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/manual-author-scripts/2026.loreslm-1.24/",
pages = "271--287",
ISBN = "979-8-89176-377-7",
abstract = "Language models have recently gained significant attention in natural language processing, showing strong performance across a wide range of tasks such as text classification, text generation, language modeling, and question answering (Q A). Despite these advances, one of the most critical challenges faced by language models is hallucination {---} the generation of fluent and plausible responses that are factually incorrect or fabricated. This study presents preliminary work on analyzing hallucinations in Q A tasks for low-resource languages. We evaluate model performance on the Mpox-Myanmar and SynDARin datasets using three API-accessible models: LLaMA 3.1 70B, LLaMA 3.1 8B, and Gemini 2.5 {---} and two monolingual language models: HyGPT 10B for Armenian and SeaLLM for Burmese. Our work contributes by systematically examining hallucinations through quantitative analysis using Natural Language Inference and Semantic Similarity metrics across different model sizes and prompting strategies, as well as qualitative analysis through human verification. We further investigate whether common assumptions about model behavior hold consistently and provide explanations for the observed patterns."
}Markdown (Informal)
["So, How Much Do LLMs Hallucinate on Low-Resource Languages?" A Quantitative and Qualitative Analysis](https://preview.aclanthology.org/manual-author-scripts/2026.loreslm-1.24/) (Trivedi et al., LoResLM 2026)
ACL