@article{fujita-etal-2026-legalrikai,
title = "{L}egal{R}ikai: Open Benchmark {--} a Benchmark for Complex {J}apanese Corporate Legal Tasks",
author = "Fujita, Shogo and
Naraki, Yuji and
Zhu, Yiqing and
Mori, Shinsuke",
editor = "Piperidis, Stelios and
Bel, N{\'u}ria and
van den Heuvel, Henk and
Ide, Nancy and
Krek, Simon and
Toral, Antonio",
journal = "International Conference on Language Resources and Evaluation",
volume = "main",
month = may,
year = "2026",
address = "Palma de Mallorca, Spain",
publisher = "ELRA Language Resource Association",
url = "https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.397/",
pages = "5055--5077",
abstract = "This paper introduces LegalRikai: Open Benchmark, a new benchmark comprising four complex tasks that emulate Japanese corporate legal practices. The benchmark was created by legal professionals under the supervision of an attorney. This benchmark has 100 samples that require long-form, structured outputs, and we evaluated them against multiple practical criteria. We conducted both human and automated evaluations using leading LLMs, including GPT-5, Gemini 2.5 Pro, and Claude Opus 4.1. Our human evaluation revealed that abstract instructions prompted unnecessary modifications, highlighting model weaknesses in document-level editing that were missed by conventional short-text tasks. Furthermore, our analysis reveals that automated evaluation aligns well with human judgment on criteria with clear linguistic grounding, and assessing structural consistency remains a challenge. The result demonstrates the utility of automated evaluation as a screening tool when expert availability is limited. We propose a dataset evaluation framework to promote more practice-oriented research in the legal domain."
}Markdown (Informal)
[LegalRikai: Open Benchmark – a Benchmark for Complex Japanese Corporate Legal Tasks](https://preview.aclanthology.org/ingest-lrec/2026.lrec-main.397/) (Fujita et al., LREC 2026)
ACL