@inproceedings{liu-etal-2024-mquine,
title = "{MQ}uin{E}: a Cure for ``{Z}-paradox'' in Knowledge Graph Embedding",
author = "Liu, Yang and
Fang, Huang and
Cai, Yunfeng and
Sun, Mingming",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.549/",
doi = "10.18653/v1/2024.emnlp-main.549",
pages = "9837--9850",
abstract = "Knowledge graph embedding (KGE) models achieved state-of-the-art results on many knowledge graph tasks including link prediction and information retrieval. Despite the superior performance of KGE models in practice, we discover a deficiency in the expressiveness of some popular existing KGE models called \textit{Z-paradox}. Motivated by the existence of Z-paradox, we propose a new KGE model called \textit{MQuinE} that does not suffer from Z-paradox while preserves strong expressiveness to model various relation patterns including symmetric/asymmetric, inverse, 1-N/N-1/N-N, and composition relations with theoretical justification. Experiments on real-world knowledge bases indicate that Z-paradox indeed degrades the performance of existing KGE models, and can cause more than 20{\%} accuracy drop on some challenging test samples. Our experiments further demonstrate that MQuinE can mitigate the negative impact of Z-paradox and outperform existing KGE models by a visible margin on link prediction tasks."
}
Markdown (Informal)
[MQuinE: a Cure for “Z-paradox” in Knowledge Graph Embedding](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.549/) (Liu et al., EMNLP 2024)
ACL