@inproceedings{wang-etal-2025-overview,
title = "Overview of {CCL}25-Eval Task 7: {C}hinese Literary Language Understanding Evaluation ({Z}heng{M}ing)",
author = "Wang, Kang and
Wang, Qing and
Peng, Min and
Yue, Kun and
Hu, Gang",
editor = "Lin, Hongfei and
Li, Bin and
Tan, Hongye",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.34/",
pages = "288--297",
abstract = "``The 24th Chinese Computational Linguistics Conference (CCL25-Eval) features 12 technical evaluation tasks. Among them, Task 7 is the Chinese Literary Language Understanding Evaluation (ZhengMing). ZhengMing is a universal and scalable evaluation framework designed to assess natural language processing (NLP) tasks in the literary domain, such as text classification, text generation, automated question answering, relation extraction, and machine translation.ZhengMing framework aims to evaluate the performance of large language models (LLMs) in the literary field at a fine-grained level. In this mission, 89 teams signed up for the competition, with5 teams ultimately submitting results. The highest score achieved is 0.65. This paper presents and discusses the dataset, task descriptions, competition results, and other relevant information for this evaluation task. This paper introduces and presents relevant information about this evaluation task, including the dataset, task description, and competition results. More details are available at https://github.com/isShayulajiao/CCL25-Eval-ZhengMing.''"
}Markdown (Informal)
[Overview of CCL25-Eval Task 7: Chinese Literary Language Understanding Evaluation (ZhengMing)](https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.34/) (Wang et al., CCL 2025)
ACL