@inproceedings{luo-etal-2025-antif,
title = "{A}nt{IF}:大语言模型抗干扰能力评估",
author = "Luo, Yajing and
Hou, Yutao and
Chen, Yun and
Chen, Guanhua",
editor = "Sun, Maosong and
Duan, Peiyong and
Liu, Zhiyuan and
Xu, Ruifeng and
Sun, Weiwei",
booktitle = "Proceedings of the 24th {C}hina National Conference on Computational Linguistics ({CCL} 2025)",
month = aug,
year = "2025",
address = "Jinan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.26/",
pages = "335--362",
abstract = "``本文提出了一种多智能体协同的干扰数据生成框架,旨在评测分析大语言模型在复杂干扰下的鲁棒性。该框架以数学领域为起点,逐步扩展至医学、法律、科学及通用场景,构建了涵盖拼写干扰、数字干扰、类型干扰与谣言干扰四类干扰的跨领域数据集AntIF,共计近5000条数据。在此基础上,本文对主流开源语言模型进行了系统的抗干扰能力评估,并结合不同的提示工程策略与模型微调方法,深入分析了AntIF 在提升模型鲁棒性方面的实际效果。''"
}Markdown (Informal)
[AntIF:大语言模型抗干扰能力评估](https://preview.aclanthology.org/ingest-ccl/2025.ccl-1.26/) (Luo et al., CCL 2025)
ACL
- Yajing Luo, Yutao Hou, Yun Chen, and Guanhua Chen. 2025. AntIF:大语言模型抗干扰能力评估. In Proceedings of the 24th China National Conference on Computational Linguistics (CCL 2025), pages 335–362, Jinan, China. Chinese Information Processing Society of China.