@inproceedings{su-etal-2025-system,
title = "System Report for {CCL}25-Eval Task 7: A Two-stage Framework for Aligning {LLM} to {C}hinese Literature via Fine-Tuning and Prompting",
author = "Su, Fan and
Qin, Yiming and
Zhao, Aijia and
Wang, Zhenxu and
Huang, Zekang",
editor = "Lin, Hongfei and
Li, Bin and
Tan, Hongye",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.33/",
pages = "278--287",
abstract = "``This system report presents our approach and results for the First Chinese Literary Language Understanding Evaluation (ZhengMing) task at CCL25-Eval. The ZhengMing evaluation benchmark consists of seven subtasks: Biases in Modern Literary Criticism, Modern Literary Criticism Mining, Classical Chinese Literature Comprehension, Literary Reading Comprehension,Literary Named Entity Recognition, Literary Language Style Transfer, and Literary Work Style Prediction. To address these tasks, we propose a two-stage framework named StageAli to align large language models (LLMs) to the Chinese literature domain. In the first stage, we employLow-Rank Adaptation (LoRA) to fine-tune an LLM on Chinese literary datasets, aiming to adapt the model to Chinese literature domain. In the second stage, we utilize a combination of prompt-ing strategies to further unleash the potential of the fine-tuned model in addressing the ChineseLiterary Language Understanding task. Our proposed StageAli framework achieves second place in the overall evaluation, demonstrating the effectiveness of our method.''"
}Markdown (Informal)
[System Report for CCL25-Eval Task 7: A Two-stage Framework for Aligning LLM to Chinese Literature via Fine-Tuning and Prompting](https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.33/) (Su et al., CCL 2025)
ACL