@inproceedings{wang-etal-2025-v,
title = "{V}-{SEAM}: Visual Semantic Editing and Attention Modulating for Causal Interpretability of Vision-Language Models",
author = "Wang, Qidong and
Hu, Junjie and
Jiang, Ming",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.880/",
pages = "17407--17431",
ISBN = "979-8-89176-332-6",
abstract = "Recent advances in causal interpretability have extended from language models to vision-language models (VLMs), seeking to reveal their internal mechanisms through input interventions. While textual interventions often target semantics, visual interventions typically rely on coarse pixel-level perturbations, limiting semantic insights on multimodal integration. In this study, we introduce V-SEAM, a novel framework that combines **V**isual **S**emantic **E**diting and **A**ttention **M**odulating for causal interpretation of VLMs. V-SEAM enables concept-level visual manipulations and identifies attention heads with positive or negative contributions to predictions across three semantic levels: objects, attributes, and relationships. We observe that positive heads are often shared within the same semantic level but vary across levels, while negative heads tend to generalize broadly. Finally, we introduce an automatic method to modulate key head embeddings, demonstrating enhanced performance for both LLAVA and InstructBLIP across three diverse VQA benchmarks. Our data and code are released at: https://github.com/petergit1/V-SEAM."
}Markdown (Informal)
[V-SEAM: Visual Semantic Editing and Attention Modulating for Causal Interpretability of Vision-Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.880/) (Wang et al., EMNLP 2025)
ACL