@inproceedings{tan-jiang-2021-bert,
title = "Does {BERT} Understand Idioms? A Probing-Based Empirical Study of {BERT} Encodings of Idioms",
author = "Tan, Minghuan and
Jiang, Jing",
editor = "Mitkov, Ruslan and
Angelova, Galia",
booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)",
month = sep,
year = "2021",
address = "Held Online",
publisher = "INCOMA Ltd.",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.ranlp-1.156/",
pages = "1397--1407",
abstract = "Understanding idioms is important in NLP. In this paper, we study to what extent pre-trained BERT model can encode the meaning of a potentially idiomatic expression (PIE) in a certain context. We make use of a few existing datasets and perform two probing tasks: PIE usage classification and idiom paraphrase identification. Our experiment results suggest that BERT indeed can separate the literal and idiomatic usages of a PIE with high accuracy. It is also able to encode the idiomatic meaning of a PIE to some extent."
}
Markdown (Informal)
[Does BERT Understand Idioms? A Probing-Based Empirical Study of BERT Encodings of Idioms](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.ranlp-1.156/) (Tan & Jiang, RANLP 2021)
ACL