@inproceedings{wen-etal-2024-ji,
title = "基于领域信息分解式学习的大语言模型修辞认知增强方法(Method for Enhancing Rhetorical Cognition of Large Language Models Based on Decomposed Learning of Field Information)",
author = "Wen, Wang and
Dong, Yu and
Pengyuan, Liu",
editor = "Sun, Maosong and
Liang, Jiye and
Han, Xianpei and
Liu, Zhiyuan and
He, Yulan",
booktitle = "Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 1: Main Conference)",
month = jul,
year = "2024",
address = "Taiyuan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.ccl-1.70/",
pages = "894--909",
language = "zho",
abstract = "{\textquotedblleft}中文修辞手法多样且概念差异性大,大语言模型对部分修辞手法的认知存在缺陷。针对该问题,本文研究如何增强大语言模型的修辞认知能力,并探究其与修辞识别性能之间的关系。为此,本文提出了QAKAG框架,此框架首先引入信息分解式学习思想,通过问答形式检测大语言模型的修辞认知缺陷,然后以四种不同的知识组合方式探究最优信息补充机制,实现了大语言模型修辞认知能力的增强。本文构建了多类别中文修辞句数据集MCRSD和修辞知识库MCRKB,并在ChatGPT4等六个大语言模型上开展实验研究,验证了QAKAG框架对增强大语言模型修辞认知能力的有效性以及其各阶段的必要性。结果表明,在QAKAG框架的增强下,六个大语言模型在多类别修辞识别任务上的性能相较直接回答识别问题的平均F1值提高22.1{\%},优于Zero-shot-CoT、RAG-BaiKe、Few-Shot5提示策略。{\textquotedblright}"
}
Markdown (Informal)
[基于领域信息分解式学习的大语言模型修辞认知增强方法(Method for Enhancing Rhetorical Cognition of Large Language Models Based on Decomposed Learning of Field Information)](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.ccl-1.70/) (Wen et al., CCL 2024)
ACL