@inproceedings{feldhus-kopf-2025-interpreting,
title = "Interpreting Language Models Through Concept Descriptions: A Survey",
author = "Feldhus, Nils and
Kopf, Laura",
editor = "Belinkov, Yonatan and
Mueller, Aaron and
Kim, Najoung and
Mohebbi, Hosein and
Chen, Hanjie and
Arad, Dana and
Sarti, Gabriele",
booktitle = "Proceedings of the 8th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.blackboxnlp-1.8/",
pages = "149--162",
ISBN = "979-8-89176-346-3",
abstract = "Understanding the decision-making processes of neural networks is a central goal of mechanistic interpretability. In the context of Large Language Models (LLMs), this involves uncovering the underlying mechanisms and identifying the roles of individual model components such as neurons and attention heads, as well as model abstractions such as the learned sparse features extracted by Sparse Autoencoders (SAEs). A rapidly growing line of work tackles this challenge by using powerful generator models to produce open-vocabulary, natural language concept descriptions for these components. In this paper, we provide the first survey of the emerging field of concept descriptions for model components and abstractions. We chart the key methods for generating these descriptions, the evolving landscape of automated and human metrics for evaluating them, and the datasets that underpin this research. Our synthesis reveals a growing demand for more rigorous, causal evaluation. By outlining the state of the art and identifying key challenges, this survey provides a roadmap for future research toward making models more transparent."
}Markdown (Informal)
[Interpreting Language Models Through Concept Descriptions: A Survey](https://preview.aclanthology.org/ingest-emnlp/2025.blackboxnlp-1.8/) (Feldhus & Kopf, BlackboxNLP 2025)
ACL