@inproceedings{kumar-etal-2025-extracting,
title = "Extracting Conceptual Spaces from {LLM}s Using Prototype Embeddings",
author = "Kumar, Nitesh and
Chatterjee, Usashi and
Schockaert, Steven",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.493/",
doi = "10.18653/v1/2025.findings-emnlp.493",
pages = "9275--9298",
ISBN = "979-8-89176-335-7",
abstract = "Conceptual spaces represent entities and concepts using cognitively meaningful dimensions, typically referring to perceptual features. Such representations are widely used in cognitive science and have the potential to serve as a cornerstone for explainable AI. Unfortunately, they have proven notoriously difficult to learn, although recent LLMs appear to capture the required perceptual features to a remarkable extent. Nonetheless, practical methods for extracting the corresponding conceptual spaces are currently still lacking. While various methods exist for extracting embeddings from LLMs, extracting conceptual spaces also requires us to encode the underlying features. In this paper, we propose a strategy in which features (e.g. sweetness) are encoded by embedding the description of a corresponding prototype (e.g. a very sweet food). To improve this strategy, we fine-tune the LLM to align the prototype embeddings with the corresponding conceptual space dimensions. Our empirical analysis finds this approach to be highly effective."
}Markdown (Informal)
[Extracting Conceptual Spaces from LLMs Using Prototype Embeddings](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.493/) (Kumar et al., Findings 2025)
ACL