@inproceedings{someya-etal-2025-derivational,
title = "Derivational Probing: Unveiling the Layer-wise Derivation of Syntactic Structures in Neural Language Models",
author = "Someya, Taiga and
Yoshida, Ryo and
Yanaka, Hitomi and
Oseki, Yohei",
editor = "Boleda, Gemma and
Roth, Michael",
booktitle = "Proceedings of the 29th Conference on Computational Natural Language Learning",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.conll-1.7/",
pages = "93--104",
ISBN = "979-8-89176-271-8",
abstract = "Recent work has demonstrated that neural language models encode syntactic structures in their internal *representations*, yet the *derivations* by which these structures are constructed across layers remain poorly understood. In this paper, we propose *Derivational Probing* to investigate how micro-syntactic structures (e.g., subject noun phrases) and macro-syntactic structures (e.g., the relationship between the root verbs and their direct dependents) are constructed as word embeddings propagate upward across layers.Our experiments on BERT reveal a clear bottom-up derivation: micro-syntactic structures emerge in lower layers and are gradually integrated into a coherent macro-syntactic structure in higher layers.Furthermore, a targeted evaluation on subject-verb number agreement shows that the timing of constructing macro-syntactic structures is critical for downstream performance, suggesting an optimal timing for integrating global syntactic information."
}
Markdown (Informal)
[Derivational Probing: Unveiling the Layer-wise Derivation of Syntactic Structures in Neural Language Models](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.conll-1.7/) (Someya et al., CoNLL 2025)
ACL