@inproceedings{zhiheng-etal-2023-safety,
title = "Safety and Ethical Concerns of Large Language Models",
author = "Zhiheng, Xi and
Rui, Zheng and
Tao, Gui",
editor = "Sun, Maosong and
Qin, Bing and
Qiu, Xipeng and
Jiang, Jing and
Han, Xianpei",
booktitle = "Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 4: Tutorial Abstracts)",
month = aug,
year = "2023",
address = "Harbin, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2023.ccl-4.2",
pages = "9--16",
abstract = "{``}Recent months have witnessed significant progress in the field of large language models (LLMs).Represented by ChatGPT and GPT-4, LLMs perform well in various natural language process-ing tasks and have been applied to many downstream applications to facilitate people{'}s lives. However, there still exist safety and ethical concerns. Specifically, LLMs suffer from social bias,robustness problems, and poisoning issues, all of which may induce LLMs to spew harmful con-tents. We propose this tutorial as a gentle introduction to the safety and ethical issues of LLMs.{''}",
language = "English",
}
Markdown (Informal)
[Safety and Ethical Concerns of Large Language Models](https://aclanthology.org/2023.ccl-4.2) (Zhiheng et al., CCL 2023)
ACL
- Xi Zhiheng, Zheng Rui, and Gui Tao. 2023. Safety and Ethical Concerns of Large Language Models. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 4: Tutorial Abstracts), pages 9–16, Harbin, China. Chinese Information Processing Society of China.