@inproceedings{marecek-rosa-2018-extracting,
title = "Extracting Syntactic Trees from Transformer Encoder Self-Attentions",
author = "Mare{\v{c}}ek, David and
Rosa, Rudolf",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Alishahi, Afra",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/W18-5444/",
doi = "10.18653/v1/W18-5444",
pages = "347--349",
abstract = "This is a work in progress about extracting the sentence tree structures from the encoder`s self-attention weights, when translating into another language using the Transformer neural network architecture. We visualize the structures and discuss their characteristics with respect to the existing syntactic theories and annotations."
}
Markdown (Informal)
[Extracting Syntactic Trees from Transformer Encoder Self-Attentions](https://preview.aclanthology.org/ingest_wac_2008/W18-5444/) (Mareček & Rosa, EMNLP 2018)
ACL