@inproceedings{gangal-hovy-2020-bertering,
title = "{BERT}ering {RAMS}: What and How Much does {BERT} Already Know About Event Arguments? - A Study on the {RAMS} Dataset",
author = "Gangal, Varun and
Hovy, Eduard",
editor = "Alishahi, Afra and
Belinkov, Yonatan and
Chrupa{\l}a, Grzegorz and
Hupkes, Dieuwke and
Pinter, Yuval and
Sajjad, Hassan",
booktitle = "Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.blackboxnlp-1.1",
doi = "10.18653/v1/2020.blackboxnlp-1.1",
pages = "1--10",
abstract = "Using the attention map based probing framework from (Clark et al., 2019), we observe that, on the RAMS dataset (Ebner et al., 2020), BERT{'}s attention heads have modest but well above-chance ability to spot event arguments sans any training or domain finetuning, varying from a low of 17.77{\%} for Place to a high of 51.61{\%} for Artifact. Next, we find that linear combinations of these heads, estimated with approx. 11{\%} of available total event argument detection supervision, can push performance well higher for some roles {---} highest two being Victim (68.29{\%} Accuracy) and Artifact (58.82{\%} Accuracy). Furthermore, we investigate how well our methods do for cross-sentence event arguments. We propose a procedure to isolate {``}best heads{''} for cross-sentence argument detection separately of those for intra-sentence arguments. The heads thus estimated have superior cross-sentence performance compared to their jointly estimated equivalents, albeit only under the unrealistic assumption that we already know the argument is present in another sentence. Lastly, we seek to isolate to what extent our numbers stem from lexical frequency based associations between gold arguments and roles. We propose NONCE, a scheme to create adversarial test examples by replacing gold arguments with randomly generated {``}nonce{''} words. We find that learnt linear combinations are robust to NONCE, though individual best heads can be more sensitive.",
}
Markdown (Informal)
[BERTering RAMS: What and How Much does BERT Already Know About Event Arguments? - A Study on the RAMS Dataset](https://aclanthology.org/2020.blackboxnlp-1.1) (Gangal & Hovy, BlackboxNLP 2020)
ACL