@inproceedings{rezaei-blanco-2025-making,
title = "Making Language Models Robust Against Negation",
author = "Rezaei, MohammadHossein and
Blanco, Eduardo",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.413/",
pages = "8123--8142",
ISBN = "979-8-89176-189-6",
abstract = "Negation has been a long-standing challenge for language models.Previous studies have shown that they struggle with negation in many natural language understanding tasks.In this work, we propose a self-supervised method to make language models more robust against negation.We introduce a novel task, Next Sentence Polarity Prediction (NSPP), and a variation of the Next Sentence Prediction (NSP) task.We show that BERT and RoBERTa further pre-trained on our tasks outperform the off-the-shelf versions on nine negation-related benchmarks.Most notably, our pre-training tasks yield between 1.8{\%} and 9.1{\%} improvement on CondaQA, a large question-answering corpus requiring reasoning over negation."
}
Markdown (Informal)
[Making Language Models Robust Against Negation](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.413/) (Rezaei & Blanco, NAACL 2025)
ACL
- MohammadHossein Rezaei and Eduardo Blanco. 2025. Making Language Models Robust Against Negation. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 8123–8142, Albuquerque, New Mexico. Association for Computational Linguistics.