@inproceedings{foo-khoo-2025-lionguard,
title = "{L}ion{G}uard: A Contextualized Moderation Classifier to Tackle Localized Unsafe Content",
author = "Foo, Jessica and
Khoo, Shaun",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven and
Darwish, Kareem and
Agarwal, Apoorv",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics: Industry Track",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.60/",
pages = "707--731",
abstract = "As large language models (LLMs) become increasingly prevalent in a wide variety of applications, concerns about the safety of their outputs have become more significant. Most efforts at safety-tuning or moderation today take on a predominantly Western-centric view of safety, especially for toxic, hateful, or violent speech. In this paper, we describe LionGuard, a Singapore-contextualized moderation classifier that can serve as guardrails against unsafe LLM usage. When assessed on Singlish data, LionGuard outperforms existing widely-used moderation APIs, which are not finetuned for the Singapore context, by at least 14{\%} (binary) and up to 51{\%} (multi-label). Our work highlights the benefits of localization for moderation classifiers and presents a practical and scalable approach for low-resource languages, particularly English-based creoles."
}
Markdown (Informal)
[LionGuard: A Contextualized Moderation Classifier to Tackle Localized Unsafe Content](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.60/) (Foo & Khoo, COLING 2025)
ACL