@inproceedings{kasner-dusek-2026-animatedllm-explaining,
title = "{A}nimated{LLM}: Explaining {LLM}s with Interactive Visualizations",
author = "Kasner, Zden{\v{e}}k and
Dusek, Ondrej",
editor = {A{\ss}enmacher, Matthias and
Biester, Laura and
Borg, Claudia and
Kov{\'a}cs, Gy{\"o}rgy and
Mieskes, Margot and
Serrano, Sofia},
booktitle = "Proceedings of the Seventh Workshop on Teaching Natural Language Processing ({T}each{NLP} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.teachingnlp-1.1/",
pages = "1--6",
ISBN = "979-8-89176-375-3",
abstract = "Large language models (LLMs) are becoming central to natural language processing education, yet materials showing their mechanics are sparse. We present AnimatedLLM, an interactive web application that provides step-by-step visualizations of a Transformer language model. AnimatedLLM runs entirely in the browser, using pre-computed traces of open LLMs applied on manually curated inputs. The application is available at https://animatedllm.github.io, both as a teaching aid and for self-educational purposes."
}Markdown (Informal)
[AnimatedLLM: Explaining LLMs with Interactive Visualizations](https://preview.aclanthology.org/ingest-eacl/2026.teachingnlp-1.1/) (Kasner & Dusek, TeachingNLP 2026)
ACL