@inproceedings{das-srihari-2024-improving,
title = "Improving Dialog Safety using Socially Aware Contrastive Learning",
author = "Das, Souvik and
Srihari, Rohini K.",
editor = "Graham, Yvette and
Liu, Qun and
Lampouras, Gerasimos and
Iacobacci, Ignacio and
Madden, Sinead and
Khalid, Haider and
Qureshi, Rameez",
booktitle = "Proceedings of the 1st Workshop on Simulating Conversational Intelligence in Chat (SCI-CHAT 2024)",
month = mar,
year = "2024",
address = "St. Julians, Malta",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/remove-affiliations/2024.scichat-1.2/",
pages = "4--18",
abstract = "State-of-the-art conversational AI systems raise concerns due to their potential risks of generating unsafe, toxic, unethical, or dangerous content. Previous works have developed datasets to teach conversational agents the appropriate social paradigms to respond effectively to specifically designed hazardous content. However, models trained on these adversarial datasets still struggle to recognize subtle unsafe situations that appear naturally in conversations or introduce an inappropriate response in a casual context. To understand the extent of this problem, we study prosociality in both adversarial and casual dialog contexts and audit the response quality of general-purpose language models in terms of propensity to produce unsafe content. We propose a dual-step fine-tuning process to address these issues using a socially aware n-pair contrastive loss. Subsequently, we train a base model that integrates prosocial behavior by leveraging datasets like Moral Integrity Corpus (MIC) and ProsocialDialog. Experimental results on several dialog datasets demonstrate the effectiveness of our approach in generating socially appropriate responses."
}
Markdown (Informal)
[Improving Dialog Safety using Socially Aware Contrastive Learning](https://preview.aclanthology.org/remove-affiliations/2024.scichat-1.2/) (Das & Srihari, SCI-CHAT 2024)
ACL