@inproceedings{peng-etal-2022-discovering,
title = "Discovering Financial Hypernyms by Prompting Masked Language Models",
author = "Peng, Bo and
Chersoni, Emmanuele and
Hsu, Yu-Yin and
Huang, Chu-Ren",
editor = "El-Haj, Mahmoud and
Rayson, Paul and
Zmandar, Nadhem",
booktitle = "Proceedings of the 4th Financial Narrative Processing Workshop @LREC2022",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.fnp-1.2",
pages = "10--16",
abstract = "With the rising popularity of Transformer-based language models, several studies have tried to exploit their masked language modeling capabilities to automatically extract relational linguistic knowledge, although this kind of research has rarely investigated semantic relations in specialized domains. The present study aims at testing a general-domain and a domain-adapted Transformer models on two datasets of financial term-hypernym pairs using the prompt methodology. Our results show that the differences of prompts impact critically on models{'} performance, and that domain adaptation on financial text generally improves the capacity of the models to associate the target terms with the right hypernyms, although the more successful models are those retaining a general-domain vocabulary.",
}
Markdown (Informal)
[Discovering Financial Hypernyms by Prompting Masked Language Models](https://aclanthology.org/2022.fnp-1.2) (Peng et al., FNP 2022)
ACL