@inproceedings{temesgen-etal-2025-extracting,
title = "Extracting Linguistic Information from Large Language Models: Syntactic Relations and Derivational Knowledge",
author = "Temesgen, Tsedeniya Kinfe and
Marco, Marion Di and
Fraser, Alexander",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1384/",
doi = "10.18653/v1/2025.emnlp-main.1384",
pages = "27198--27214",
ISBN = "979-8-89176-332-6",
abstract = "This paper presents a study of the linguistic knowledge and generalization capabilities of Large Language Models (LLMs), focusing ontheir morphosyntactic competence. We design three diagnostic tasks: (i) labeling syntactic information at the sentence level - identifying subjects, objects, and indirect objects; (ii) derivational decomposition at the word level - identifying morpheme boundaries and labeling thedecomposed sequence; and (iii) in-depth study of morphological decomposition in German and Amharic. We evaluate prompting strategies in GPT-4o and LLaMA 3.3-70B to extract different types of linguistic structure for typologically diverse languages. Our results showthat GPT-4o consistently outperforms LLaMA in all tasks; however, both models exhibit limitations and show little evidence of abstract morphological rule learning. Importantly, we show strong evidence that the models fail to learn underlying morphological structures. Therefore,raising important doubts about their ability to generalize."
}Markdown (Informal)
[Extracting Linguistic Information from Large Language Models: Syntactic Relations and Derivational Knowledge](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.1384/) (Temesgen et al., EMNLP 2025)
ACL