@inproceedings{li-etal-2022-improving-bilingual,
title = "Improving Bilingual Lexicon Induction with Cross-Encoder Reranking",
author = "Li, Yaoyiran and
Liu, Fangyu and
Vuli{\'c}, Ivan and
Korhonen, Anna",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.302/",
doi = "10.18653/v1/2022.findings-emnlp.302",
pages = "4100--4116",
abstract = "Bilingual lexicon induction (BLI) with limited bilingual supervision is a crucial yet challenging task in multilingual NLP. Current state-of-the-art BLI methods rely on the induction of cross-lingual word embeddings (CLWEs) to capture cross-lingual word similarities; such CLWEs are obtained {\ensuremath{<}}b{\ensuremath{>}}1){\ensuremath{<}}/b{\ensuremath{>}} via traditional static models (e.g., VecMap), or {\ensuremath{<}}b{\ensuremath{>}}2){\ensuremath{<}}/b{\ensuremath{>}} by extracting type-level CLWEs from multilingual pretrained language models (mPLMs), or {\ensuremath{<}}b{\ensuremath{>}}3){\ensuremath{<}}/b{\ensuremath{>}} through combining the former two options. In this work, we propose a novel semi-supervised {\ensuremath{<}}i{\ensuremath{>}}post-hoc{\ensuremath{<}}/i{\ensuremath{>}} reranking method termed {\ensuremath{<}}b{\ensuremath{>}}BLICEr{\ensuremath{<}}/b{\ensuremath{>}} ({\ensuremath{<}}b{\ensuremath{>}}BLI{\ensuremath{<}}/b{\ensuremath{>}} with {\ensuremath{<}}b{\ensuremath{>}}C{\ensuremath{<}}/b{\ensuremath{>}}ross-{\ensuremath{<}}b{\ensuremath{>}}E{\ensuremath{<}}/b{\ensuremath{>}}ncoder {\ensuremath{<}}b{\ensuremath{>}}R{\ensuremath{<}}/b{\ensuremath{>}}eranking), applicable to any precalculated CLWE space, which improves their BLI capability. The key idea is to {\textquoteleft}extract' cross-lingual lexical knowledge from mPLMs, and then combine it with the original CLWEs. This crucial step is done via {\ensuremath{<}}b{\ensuremath{>}}1){\ensuremath{<}}/b{\ensuremath{>}} creating a word similarity dataset, comprising positive word pairs (i.e., true translations) and hard negative pairs induced from the original CLWE space, and then {\ensuremath{<}}b{\ensuremath{>}}2){\ensuremath{<}}/b{\ensuremath{>}} fine-tuning an mPLM (e.g., mBERT or XLM-R) in a cross-encoder manner to predict the similarity scores. At inference, we {\ensuremath{<}}b{\ensuremath{>}}3){\ensuremath{<}}/b{\ensuremath{>}} combine the similarity score from the original CLWE space with the score from the BLI-tuned cross-encoder. BLICEr establishes new state-of-the-art results on two standard BLI benchmarks spanning a wide spectrum of diverse languages: it substantially outperforms a series of strong baselines across the board. We also validate the robustness of BLICEr with different CLWEs."
}
Markdown (Informal)
[Improving Bilingual Lexicon Induction with Cross-Encoder Reranking](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-emnlp.302/) (Li et al., Findings 2022)
ACL