@inproceedings{mikami-etal-2025-large,
title = "Can Large Language Models Robustly Perform Natural Language Inference for {J}apanese Comparatives?",
author = "Mikami, Yosuke and
Matsuoka, Daiki and
Yanaka, Hitomi",
editor = "Evang, Kilian and
Kallmeyer, Laura and
Pogodalla, Sylvain",
booktitle = "Proceedings of the 16th International Conference on Computational Semantics",
month = sep,
year = "2025",
address = {D{\"u}sseldorf, Germany},
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/iwcs-25-ingestion/2025.iwcs-1.13/",
pages = "137--146",
ISBN = "979-8-89176-316-6",
abstract = "Large Language Models (LLMs) perform remarkably well in Natural Language Inference (NLI).However, NLI involving numerical and logical expressions remains challenging.Comparatives are a key linguistic phenomenon related to such inference, but the robustness of LLMs in handling them, especially in languages that are not dominant in the models' training data, such as Japanese, has not been sufficiently explored.To address this gap, we construct a Japanese NLI dataset that focuses on comparatives and evaluate various LLMs in zero-shot and few-shot settings.Our results show that the performance of the models is sensitive to the prompt formats in the zero-shot setting and influenced by the gold labels in the few-shot examples.The LLMs also struggle to handle linguistic phenomena unique to Japanese.Furthermore, we observe that prompts containing logical semantic representations help the models predict the correct labels for inference problems that they struggle to solve even with few-shot examples."
}
Markdown (Informal)
[Can Large Language Models Robustly Perform Natural Language Inference for Japanese Comparatives?](https://preview.aclanthology.org/iwcs-25-ingestion/2025.iwcs-1.13/) (Mikami et al., IWCS 2025)
ACL