@inproceedings{hayashi-2025-evaluating,
title = "Evaluating {LLM}s' Capability to Identify Lexical Semantic Equivalence: Probing with the Word-in-Context Task",
author = "Hayashi, Yoshihiko",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.466/",
pages = "6985--6998",
abstract = "This study proposes a method to evaluate the capability of large language models (LLMs) in identifying lexical semantic equivalence. The Word-in-Context (WiC) task, a benchmark designed to determine whether the meanings of a target word remain identical across different contexts, is employed as a probing task. Experiments are conducted with several LLMs, including proprietary GPT models and open-source models, using zero-shot prompting with adjectives that represent varying levels of semantic equivalence (e.g., {\textquotedblleft}the same{\textquotedblright}) or inequivalence (e.g., {\textquotedblleft}different{\textquotedblright}). The fundamental capability to identify lexical semantic equivalence in context is measured using standard accuracy metrics. Consistency across different levels of semantic equivalence is assessed via rank correlation with the expected canonical ranking of precision and recall, reflecting anticipated trends in performance across prompts. The proposed method demonstrates its effectiveness, highlighting the superior capability of GPT-4o, as it consistently outperforms other explored LLMs. Analysis of the WiC dataset, the discriminative properties of adjectives (i.e., their ability to differentiate between levels of semantic equivalence), and linguistic patterns in erroneous cases offer insights into the LLM`s capability and sensitivity. These findings could inform improvements in WiC task performance, although performance enhancement is not the primary focus of this study."
}
Markdown (Informal)
[Evaluating LLMs’ Capability to Identify Lexical Semantic Equivalence: Probing with the Word-in-Context Task](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.466/) (Hayashi, COLING 2025)
ACL