@inproceedings{prickett-etal-2025-probing,
title = "Probing Neural Network Generalization using Default Patterns",
author = "Prickett, Brandon and
Nyu, Tianyi and
Pertsova, Katya",
editor = {Nicolai, Garrett and
Chodroff, Eleanor and
Mailhot, Frederic and
{\c{C}}{\"o}ltekin, {\c{C}}a{\u{g}}r{\i}},
booktitle = "Proceedings of the The 22nd SIGMORPHON workshop on Computational Morphology, Phonology, and Phonetics",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.sigmorphon-main.4/",
pages = "34--44",
ISBN = "979-8-89176-231-2",
abstract = "Whether neural-net models can learn minoritydefault patterns has been a matter of some controversy. Results based on modeling real human language data are hard to interpret due to complexity. Therefore, we examine the learning of a simple artificial language pattern involving defaults using three computational models'':'' an Encoder-Decoder RNN, a Transformer Encoder, and a Logistic Regression. Overall, we find that the models have the hardest time with minority defaults, but can eventually learn them and apply them to novel words (although not always extend them to completely novel segments or novel CV-sequences). Typefrequency has the largest effect on learning in all models, trumping the effect of distribution. We examine the weights of two models to provide further insights into how defaults are represented inside the models."
}
Markdown (Informal)
[Probing Neural Network Generalization using Default Patterns](https://preview.aclanthology.org/fix-sig-urls/2025.sigmorphon-main.4/) (Prickett et al., SIGMORPHON 2025)
ACL
- Brandon Prickett, Tianyi Nyu, and Katya Pertsova. 2025. Probing Neural Network Generalization using Default Patterns. In Proceedings of the The 22nd SIGMORPHON workshop on Computational Morphology, Phonology, and Phonetics, pages 34–44, Albuquerque, New Mexico, USA. Association for Computational Linguistics.