@inproceedings{ivan-etal-2025-evaluating,
title = "Evaluating Pretrained Causal Language Models for Synonymy",
author = "Ivan, Ioana and
Ramisch, Carlos and
Nasr, Alexis",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.649/",
doi = "10.18653/v1/2025.findings-acl.649",
pages = "12533--12551",
ISBN = "979-8-89176-256-5",
abstract = "The scaling of causal language models in size and training data enabled them to tackle increasingly complex tasks. Despite the development of sophisticated tests to reveal their new capabilities, the underlying basis of these complex skills remains unclear. We argue that complex skills might be explained using simpler ones, represented by linguistic concepts. As an initial step in exploring this hypothesis, we focus on the lexical-semantic concept of synonymy, laying the groundwork for research into its relationship with more complex skills. We develop a comprehensive test suite to assess various aspects of synonymy under different conditions, and evaluate causal open-source models ranging up to 10 billion parameters. We find that these models effectively recognize synonymy but struggle to generate synonyms when prompted with relevant context."
}
Markdown (Informal)
[Evaluating Pretrained Causal Language Models for Synonymy](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.649/) (Ivan et al., Findings 2025)
ACL