@inproceedings{zhukova-etal-2025-contrastive,
title = "Contrastive Learning Using Graph Embeddings for Domain Adaptation of Language Models in the Process Industry",
author = "Zhukova, Anastasia and
Luehrs, Jonas and
Matt, Christian and
Gipp, Bela",
editor = "Potdar, Saloni and
Rojas-Barahona, Lina and
Montella, Sebastien",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2025",
address = "Suzhou (China)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.103/",
pages = "1472--1484",
ISBN = "979-8-89176-333-3",
abstract = "Recent trends in NLP utilize knowledge graphs (KGs) to enhance pretrained language models by incorporating additional knowledge from the graph structures to learn domain-specific terminology or relationships between documents that might otherwise be overlooked. This paper explores how SciNCL, a graph-aware neighborhood contrastive learning methodology originally designed for scientific publications, can be applied to the process industry domain, where text logs contain crucial information about daily operations and are often structured as sparse KGs. Our experiments demonstrate that language models fine-tuned with triplets derived from graph embeddings (GE) outperform a state-of-the-art mE5-large text encoder by 9.8-14.3{\%} (5.45-7.96p) on the proprietary process industry text embedding benchmark (PITEB) while having 3 times fewer parameters."
}Markdown (Informal)
[Contrastive Learning Using Graph Embeddings for Domain Adaptation of Language Models in the Process Industry](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-industry.103/) (Zhukova et al., EMNLP 2025)
ACL