@inproceedings{lichengfei-etal-2025-system,
title = "System Report for {CCL}25-Eval Task 5: Data Augmentation and Large Language Model Fine Tuning for {C}hinese Ancient Poetry Comprehension and Inference",
author = "Lichengfei, Lichengfei and
Wang, Chunyu and
Li, Hanlin and
Zhang, Wenya",
editor = "Lin, Hongfei and
Li, Bin and
Tan, Hongye",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.21/",
pages = "181--186",
abstract = "``This paper introduces the CCL25-Eval evaluation task for ancient poetry comprehension and inference, which aims to enhance the capabilities of large language models(LLMs) in processing context-dependent texts with strong cultural backgrounds. Addressing the dual challenges of se-mantic analysis and emotional inference in ancient poetry, we propose a solution that integratesQwen-series LLMs with systematic data augmentation and LoRA-based parameter-efficient fine-tuning. We construct a high-quality dataset and design multi-phase training and inference strategies. Particularly in emotional inference tasks, we explore two approaches: emotion lexicon-based indirect matching and emotion appreciation-based direct judgment of emotional lexicon options. Experimental results indicated that: 1) Data augmentation significantly improves the model{'}s overall performance; 2) The result of emotion appreciation-based direct judgment approach achieves an accuracy of 0.865, ranking first in Task A; 3) Attempts with Qwen3 and reinforcement learning approaches do not significantly improve Task B results, but demonstrated good performance in sentence semantic similarity scores and format stability.''"
}Markdown (Informal)
[System Report for CCL25-Eval Task 5: Data Augmentation and Large Language Model Fine Tuning for Chinese Ancient Poetry Comprehension and Inference](https://preview.aclanthology.org/ingest-ccl/2025.ccl-2.21/) (Lichengfei et al., CCL 2025)
ACL