@inproceedings{lewis-white-2023-mitigating,
title = "Mitigating Harms of {LLM}s via Knowledge Distillation for a Virtual Museum Tour Guide",
author = "Lewis, Ashley and
White, Michael",
editor = "Hazarika, Devamanyu and
Tang, Xiangru Robert and
Jin, Di",
booktitle = "Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!",
month = sep,
year = "2023",
address = "Prague, Czech Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.tllm-1.4/",
pages = "31--45",
abstract = "LLMs are known to be very powerful, exhibiting both great benefits and great risk. We seek to leverage the benefits, in particular the ability to be fluent, conversational dialogue agents, while minimizing the risks, such as hallucination and toxic content. In this work we use knowledge distillation to create a virtual museum tour guide dialogue agent, employing ChatGPT as a teacher model for a smaller student model, T5-large. We find the T5 model shows competitive performance, significantly reduces instances of hallucination, and shows promise for reducing toxic content."
}
Markdown (Informal)
[Mitigating Harms of LLMs via Knowledge Distillation for a Virtual Museum Tour Guide](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.tllm-1.4/) (Lewis & White, TLLM 2023)
ACL