@inproceedings{bartz-etal-2025-retrieving,
title = "Retrieving Argument Graphs Using Vision Transformers",
author = "Bartz, Kilian and
Lenz, Mirko and
Bergmann, Ralph",
editor = "Chistova, Elena and
Cimiano, Philipp and
Haddadan, Shohreh and
Lapesa, Gabriella and
Ruiz-Dolz, Ramon",
booktitle = "Proceedings of the 12th Argument mining Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.argmining-1.4/",
pages = "32--45",
ISBN = "979-8-89176-258-9",
abstract = "Through manual annotation or automated argument mining processes, arguments can be represented not only as text, but also in structured formats like graphs. When searching for relevant arguments, this additional information about the relationship between their elementary units allows for the formulation of fine-grained structural constraints by using graphs as queries. Then, a retrieval can be performed by computing the similarity between the query and all available arguments. Previous works employed Graph Edit Distance (GED) algorithms such as A* search to compute mappings between nodes and edges for determining the similarity, which is rather expensive. In this paper, we propose an alternative based on Vision Transformers where arguments are rendered as images to obtain dense embeddings. We propose multiple space-filling visualizations and evaluate the retrieval performance of the vision-based approach against an existing A* search-based method. We find that our technique runs orders of magnitude faster than A* search and scales well on larger argument graphs while achieving competitive results."
}
Markdown (Informal)
[Retrieving Argument Graphs Using Vision Transformers](https://preview.aclanthology.org/display_plenaries/2025.argmining-1.4/) (Bartz et al., ArgMining 2025)
ACL