@inproceedings{christoph-etal-2025-data,
title = "From Data to Knowledge: Evaluating How Efficiently Language Models Learn Facts",
author = "Christoph, Daniel and
Ploner, Max and
Haller, Patrick and
Akbik, Alan",
editor = "Jia, Robin and
Wallace, Eric and
Huang, Yangsibo and
Pimentel, Tiago and
Maini, Pratyush and
Dankers, Verna and
Wei, Johnny and
Lesci, Pietro",
booktitle = "Proceedings of the First Workshop on Large Language Model Memorization (L2M2)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.l2m2-1.3/",
pages = "29--46",
ISBN = "979-8-89176-278-7",
abstract = "Sample efficiency is a crucial property of language models with practical implications for training efficiency. In real-world text, information follows a long-tailed distribution. Yet, we expect models to learn and recall frequent and infrequent facts. Sample efficient models are better equipped to handle this challenge of learning and retaining rare information without requiring excessive exposure. This study analyzes multiple models of varying architectures and sizes, all trained on the same pre-training data. By annotating relational facts with their frequencies in the training corpus, we examine how model performance varies with fact frequency. Our findings show that most models perform similarly on high-frequency facts but differ notably on low-frequency facts. This analysis provides new insights into the relationship between model architecture, size, and factual learning efficiency."
}
Markdown (Informal)
[From Data to Knowledge: Evaluating How Efficiently Language Models Learn Facts](https://preview.aclanthology.org/landing_page/2025.l2m2-1.3/) (Christoph et al., L2M2 2025)
ACL