@inproceedings{pohl-etal-2025-towards,
title = "Towards a Principled Evaluation of Knowledge Editors",
author = "Pohl, Sebastian and
Ploner, Max and
Akbik, Alan",
editor = "Jia, Robin and
Wallace, Eric and
Huang, Yangsibo and
Pimentel, Tiago and
Maini, Pratyush and
Dankers, Verna and
Wei, Johnny and
Lesci, Pietro",
booktitle = "Proceedings of the First Workshop on Large Language Model Memorization (L2M2)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.l2m2-1.4/",
pages = "47--60",
ISBN = "979-8-89176-278-7",
abstract = "Model editing has been gaining increasing attention over the past few years. For Knowledge Editing in particular, more challenging evaluation datasets have recently been released. These datasets use different methodologies to score the success of editors. Yet, it remains under-explored how robust these methodologies are and whether they unfairly favor some editors. Moreover, the disruptive impact of these editors on overall model capabilities remains a constant blind spot.We address both of these problems and show that choosing different metrics and evaluation methodologies as well as different edit batch sizes can lead to a different ranking of knowledge editors. Crucially we demonstrate this effect also on general language understanding tasks evaluated alongside the knowledge editing tasks. Further we include a manual assessment of the string matching based evaluation method for knowledge editing that is favored by recently released datasets, revealing a tendency to produce false positive matches."
}
Markdown (Informal)
[Towards a Principled Evaluation of Knowledge Editors](https://preview.aclanthology.org/landing_page/2025.l2m2-1.4/) (Pohl et al., L2M2 2025)
ACL