@article{westhelle-moreira-2026-negnli,
title = "{N}eg{NLI}-{BR}: A {B}razilian {P}ortuguese Benchmark for Negation in Natural Language Inference",
author = "Westhelle, Matheus and
Moreira, Viviane",
editor = "Piperidis, Stelios and
Bel, N{\'u}ria and
van den Heuvel, Henk and
Ide, Nancy and
Krek, Simon and
Toral, Antonio",
journal = "International Conference on Language Resources and Evaluation",
volume = "main",
month = may,
year = "2026",
address = "Palma de Mallorca, Spain",
publisher = "ELRA Language Resource Association",
url = "https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.97/",
pages = "1226--1235",
abstract = "Recent studies have questioned the ability of Large Language Models (LLMs) to handle logical negation. We revisit this issue within the Natural Language Inference (NLI) task, specifically investigating whether modern LLMs can distinguish negations that alter logical entailment ({``}important'') from those that do not ({``}unimportant''). For this purpose, we introduce NegNLI-BR, a new benchmark dataset in Portuguese designed to exercise this distinction. We evaluate a range of recent open-source LLMs, comparing the performance of their base and post-trained versions. Furthermore, we employ a causal probe to measure the Average Treatment Effect of negation interventions on the internal representations of LLMs. Our findings show that many recent LLMs, including smaller variants, effectively handle negation. The causal analysis reveals that important negations induce a stable and significant effect on model representations, distinct from unimportant negations or neutral filler words. We also observe that post-training generally enhances this representational sensitivity, suggesting it refines the models' ability to encode the logical impact of negation."
}Markdown (Informal)
[NegNLI-BR: A Brazilian Portuguese Benchmark for Negation in Natural Language Inference](https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.97/) (Westhelle & Moreira, LREC 2026)
ACL