@inproceedings{singh-etal-2023-nlms,
title = "{NLM}s: Augmenting Negation in Language Models",
author = "Singh, Rituraj and
Kumar, Rahul and
Sridhar, Vivek",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.873/",
doi = "10.18653/v1/2023.findings-emnlp.873",
pages = "13104--13116",
abstract = "Negation is the fundamental component in a natural language that reverses the semantic meaning of a sentence. It plays an extremely important role across a wide range of applications, yet they are underrepresented in pre-trained language models (LMs), resulting often in wrong inferences. In this work, we try to improve the underlying understanding of the negation in the pre-trained LMs. To augment negation understanding, we propose a language model objective with a weighted cross-entropy loss and elastic weight consolidation regularization. We reduce the mean top 1 error rate for BERT-base to 1.1{\%}, BERT-large to 0.78{\%}, RoBERTA-base to 3.74{\%}, RoBERTA-large to 0.01{\%} on the negated LAMA dataset. It minimizes the BERT error rate by a margin of 8{\%} and also outperform the existing negation models. We also provide empirical evidences that negated augmented models outperform the classical models on original as well as negation benchmarks on natural language inference tasks."
}
Markdown (Informal)
[NLMs: Augmenting Negation in Language Models](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.873/) (Singh et al., Findings 2023)
ACL
- Rituraj Singh, Rahul Kumar, and Vivek Sridhar. 2023. NLMs: Augmenting Negation in Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 13104–13116, Singapore. Association for Computational Linguistics.