@inproceedings{kletz-etal-2025-polarity,
title = "Polarity inversion operators in {PLM}",
author = "Kletz, David and
Amsili, Pascal and
Candito, Marie",
editor = "Boleda, Gemma and
Roth, Michael",
booktitle = "Proceedings of the 29th Conference on Computational Natural Language Learning",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.conll-1.20/",
pages = "312--322",
ISBN = "979-8-89176-271-8",
abstract = "From a linguistic perspective, negation is a unique and inherently compositional operator. In this study, we investigate whether the bert-large-cased Pretrained Language Model (PLM) properly encodes this compositional aspect of negation when embedding a token that falls within the scope of negation.To explore this, we train two external Multi-Layer Perceptrons to modify contextual embeddings in a controlled manner. The goal is to reverse the polarity information encoded in the embedding while preserving all other token-related information. The first MLP, called the Negator, transforms a negative polarity into a positive one, while the second, the Affirmator, performs the reverse transformation.We then conduct a series of evaluations to assess the effectiveness of these operators. Our results indicate that while the Negator/Affirmator is functional, it only partially simulates the negation operator. Specifically, applying it recursively does not allow us to recover the original polarity, suggesting an incomplete representation of negation within the PLM{'}s embeddings.In addition, a downstream evaluation on the Negated LAMA dataset reveals that the modifications introduced by the Negator/Affirmator lead to a slight improvement in the model{'}s ability to account for negation in its predictions. However, applying the Negator/Affirmator recursively results in degraded representations, further reinforcing the idea that negation is not fully compositional within PLM embeddings."
}
Markdown (Informal)
[Polarity inversion operators in PLM](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.conll-1.20/) (Kletz et al., CoNLL 2025)
ACL
- David Kletz, Pascal Amsili, and Marie Candito. 2025. Polarity inversion operators in PLM. In Proceedings of the 29th Conference on Computational Natural Language Learning, pages 312–322, Vienna, Austria. Association for Computational Linguistics.