@inproceedings{goodale-etal-2025-meta,
title = "Meta-Learning Neural Mechanisms rather than {B}ayesian Priors",
author = "Goodale, Michael Eric and
Mascarenhas, Salvador and
Lakretz, Yair",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.860/",
pages = "17588--17605",
ISBN = "979-8-89176-251-0",
abstract = "Children acquire language despite being exposed to several orders of magnitude less data than large language models require. Meta-learning has been proposed as a way to integrate human-like learning biases into neural-network architectures, combining both the structured generalizations of symbolic models with the scalability of neural-network models. But what does meta-learning exactly imbue the model with? We investigate the meta-learning of formal languages and find that, contrary to previous claims, meta-trained models are not learning simplicity-based priors when meta-trained on datasets organised around simplicity. Rather, we find evidence that meta-training imprints neural mechanisms (such as counters) into the model, which function like cognitive primitives for the network on downstream tasks. Most surprisingly, we find that meta-training on a *single* formal language can provide as much improvement to a model as meta-training on 5000 different formal languages, provided that the formal language incentivizes the learning of useful neural mechanisms. Taken together, our findings provide practical implications for efficient meta-learning paradigms and new theoretical insights into linking symbolic theories and neural mechanisms."
}
Markdown (Informal)
[Meta-Learning Neural Mechanisms rather than Bayesian Priors](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.860/) (Goodale et al., ACL 2025)
ACL
- Michael Eric Goodale, Salvador Mascarenhas, and Yair Lakretz. 2025. Meta-Learning Neural Mechanisms rather than Bayesian Priors. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 17588–17605, Vienna, Austria. Association for Computational Linguistics.