@inproceedings{wu-etal-2020-perturbed,
title = "Perturbed Masking: Parameter-free Probing for Analyzing and Interpreting {BERT}",
author = "Wu, Zhiyong and
Chen, Yun and
Kao, Ben and
Liu, Qun",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.acl-main.383/",
doi = "10.18653/v1/2020.acl-main.383",
pages = "4166--4176",
abstract = "By introducing a small set of additional parameters, a \textit{probe} learns to solve specific linguistic tasks (e.g., dependency parsing) in a supervised manner using feature representations (e.g., contextualized embeddings). The effectiveness of such \textit{probing} tasks is taken as evidence that the pre-trained model encodes linguistic knowledge. However, this approach of evaluating a language model is undermined by the uncertainty of the amount of knowledge that is learned by the probe itself. Complementary to those works, we propose a parameter-free probing technique for analyzing pre-trained language models (e.g., BERT). Our method does not require direct supervision from the probing tasks, nor do we introduce additional parameters to the probing process. Our experiments on BERT show that syntactic trees recovered from BERT using our method are significantly better than linguistically-uninformed baselines. We further feed the empirically induced dependency structures into a downstream sentiment classification task and find its improvement compatible with or even superior to a human-designed dependency schema."
}
Markdown (Informal)
[Perturbed Masking: Parameter-free Probing for Analyzing and Interpreting BERT](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.acl-main.383/) (Wu et al., ACL 2020)
ACL