@inproceedings{han-etal-2026-sage,
title = "{SAGE}: An Agentic Explainer Framework for Interpreting {SAE} Features in Language Models",
author = "Han, Jiaojiao and
Xu, Wujiang and
Jin, Mingyu and
Du, Mengnan",
editor = {Matusevych, Yevgen and
Eryi{\u{g}}it, G{\"u}l{\c{s}}en and
Aletras, Nikolaos},
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 5: Industry Track)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-industry.37/",
pages = "483--495",
ISBN = "979-8-89176-384-5",
abstract = "Large language models (LLMs) have achieved remarkable progress, yet their internal mechanisms remain largely opaque, posing a significant challenge to their safe and reliable deployment. Sparse autoencoders (SAEs) have emerged as a promising tool for decomposing LLM representations into more interpretable features, but explaining the features captured by SAEs remains a challenging task. In this work, we propose SAGE (SAE Agentic Explainer), an agent-based framework that recasts feature interpretation from a passive, single-pass generation task into an active, explanation-driven process. SAGE implements a rigorous methodology by systematically formulating multiple explanations for each feature, designing targeted experiments to test them, and iteratively refining explanations based on empirical activation feedback. Experiments on features from SAEs of diverse language models demonstrate that SAGE produces explanations with significantly higher generative and predictive accuracy compared to state-of-the-art baselines."
}Markdown (Informal)
[SAGE: An Agentic Explainer Framework for Interpreting SAE Features in Language Models](https://preview.aclanthology.org/ingest-eacl/2026.eacl-industry.37/) (Han et al., EACL 2026)
ACL